stefanp created this revision.
stefanp added reviewers: nemanjai, lei.
Herald added subscribers: shchenz, kbarton.
stefanp requested review of this revision.
Herald added a project: clang.

Added a number of different builtins that exist in the XL compiler. Most of
these builtins already exist in clang under a different name.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D104386

Files:
  clang/include/clang/Basic/BuiltinsPPC.def
  clang/lib/Basic/Targets/PPC.cpp
  clang/lib/CodeGen/CGBuiltin.cpp
  clang/test/CodeGen/builtins-ppc-xlcompat-abs.c
  clang/test/CodeGen/builtins-ppc-xlcompat-alloca.c
  clang/test/CodeGen/builtins-ppc-xlcompat-bpermd.c
  clang/test/CodeGen/builtins-ppc-xlcompat-cipher.c
  clang/test/CodeGen/builtins-ppc-xlcompat-cmplx.c
  clang/test/CodeGen/builtins-ppc-xlcompat-darn.c
  clang/test/CodeGen/builtins-ppc-xlcompat-div.c
  clang/test/CodeGen/builtins-ppc-xlcompat-expect.c
  clang/test/CodeGen/builtins-ppc-xlcompat-fma.c
  clang/test/CodeGen/builtins-ppc-xlcompat-memory.c
  clang/test/CodeGen/builtins-ppc-xlcompat-mode.c
  clang/test/CodeGen/builtins-ppc-xlcompat-popcnt.c
  clang/test/CodeGen/builtins-ppc-xlcompat-rotate.c

Index: clang/test/CodeGen/builtins-ppc-xlcompat-rotate.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/builtins-ppc-xlcompat-rotate.c
@@ -0,0 +1,175 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple powerpc64-unknown-unknown -D__ppc64__ \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=BE64
+// RUN: %clang_cc1 -triple powerpc64le-unknown-unknown -D__ppc64__ \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=LE64
+// RUN: %clang_cc1 -triple powerpc-unknown-unknown \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=BE32
+// RUN: %clang_cc1 -triple powerpcle-unknown-unknown \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=LE32
+
+// BE64-LABEL: @testrotatel4(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[RS_ADDR:%.*]] = alloca i32, align 4
+// BE64-NEXT:    [[SHIFT_ADDR:%.*]] = alloca i32, align 4
+// BE64-NEXT:    store i32 [[RS:%.*]], i32* [[RS_ADDR]], align 4
+// BE64-NEXT:    store i32 [[SHIFT:%.*]], i32* [[SHIFT_ADDR]], align 4
+// BE64-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS_ADDR]], align 4
+// BE64-NEXT:    [[TMP1:%.*]] = load i32, i32* [[SHIFT_ADDR]], align 4
+// BE64-NEXT:    [[TMP2:%.*]] = call i32 @llvm.fshl.i32(i32 [[TMP0]], i32 [[TMP0]], i32 [[TMP1]])
+// BE64-NEXT:    ret i32 [[TMP2]]
+//
+// LE64-LABEL: @testrotatel4(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[RS_ADDR:%.*]] = alloca i32, align 4
+// LE64-NEXT:    [[SHIFT_ADDR:%.*]] = alloca i32, align 4
+// LE64-NEXT:    store i32 [[RS:%.*]], i32* [[RS_ADDR]], align 4
+// LE64-NEXT:    store i32 [[SHIFT:%.*]], i32* [[SHIFT_ADDR]], align 4
+// LE64-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS_ADDR]], align 4
+// LE64-NEXT:    [[TMP1:%.*]] = load i32, i32* [[SHIFT_ADDR]], align 4
+// LE64-NEXT:    [[TMP2:%.*]] = call i32 @llvm.fshl.i32(i32 [[TMP0]], i32 [[TMP0]], i32 [[TMP1]])
+// LE64-NEXT:    ret i32 [[TMP2]]
+//
+// BE32-LABEL: @testrotatel4(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[RS_ADDR:%.*]] = alloca i32, align 4
+// BE32-NEXT:    [[SHIFT_ADDR:%.*]] = alloca i32, align 4
+// BE32-NEXT:    store i32 [[RS:%.*]], i32* [[RS_ADDR]], align 4
+// BE32-NEXT:    store i32 [[SHIFT:%.*]], i32* [[SHIFT_ADDR]], align 4
+// BE32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS_ADDR]], align 4
+// BE32-NEXT:    [[TMP1:%.*]] = load i32, i32* [[SHIFT_ADDR]], align 4
+// BE32-NEXT:    [[TMP2:%.*]] = call i32 @llvm.fshl.i32(i32 [[TMP0]], i32 [[TMP0]], i32 [[TMP1]])
+// BE32-NEXT:    ret i32 [[TMP2]]
+//
+// LE32-LABEL: @testrotatel4(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[RS_ADDR:%.*]] = alloca i32, align 4
+// LE32-NEXT:    [[SHIFT_ADDR:%.*]] = alloca i32, align 4
+// LE32-NEXT:    store i32 [[RS:%.*]], i32* [[RS_ADDR]], align 4
+// LE32-NEXT:    store i32 [[SHIFT:%.*]], i32* [[SHIFT_ADDR]], align 4
+// LE32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RS_ADDR]], align 4
+// LE32-NEXT:    [[TMP1:%.*]] = load i32, i32* [[SHIFT_ADDR]], align 4
+// LE32-NEXT:    [[TMP2:%.*]] = call i32 @llvm.fshl.i32(i32 [[TMP0]], i32 [[TMP0]], i32 [[TMP1]])
+// LE32-NEXT:    ret i32 [[TMP2]]
+//
+unsigned int testrotatel4(unsigned int rs, unsigned int shift) {
+  return __rotatel4(rs, shift);
+}
+
+// BE64-LABEL: @testrotatel8(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[RS_ADDR:%.*]] = alloca i64, align 8
+// BE64-NEXT:    [[SHIFT_ADDR:%.*]] = alloca i64, align 8
+// BE64-NEXT:    store i64 [[RS:%.*]], i64* [[RS_ADDR]], align 8
+// BE64-NEXT:    store i64 [[SHIFT:%.*]], i64* [[SHIFT_ADDR]], align 8
+// BE64-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS_ADDR]], align 8
+// BE64-NEXT:    [[TMP1:%.*]] = load i64, i64* [[SHIFT_ADDR]], align 8
+// BE64-NEXT:    [[TMP2:%.*]] = call i64 @llvm.fshl.i64(i64 [[TMP0]], i64 [[TMP0]], i64 [[TMP1]])
+// BE64-NEXT:    ret i64 [[TMP2]]
+//
+// LE64-LABEL: @testrotatel8(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[RS_ADDR:%.*]] = alloca i64, align 8
+// LE64-NEXT:    [[SHIFT_ADDR:%.*]] = alloca i64, align 8
+// LE64-NEXT:    store i64 [[RS:%.*]], i64* [[RS_ADDR]], align 8
+// LE64-NEXT:    store i64 [[SHIFT:%.*]], i64* [[SHIFT_ADDR]], align 8
+// LE64-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS_ADDR]], align 8
+// LE64-NEXT:    [[TMP1:%.*]] = load i64, i64* [[SHIFT_ADDR]], align 8
+// LE64-NEXT:    [[TMP2:%.*]] = call i64 @llvm.fshl.i64(i64 [[TMP0]], i64 [[TMP0]], i64 [[TMP1]])
+// LE64-NEXT:    ret i64 [[TMP2]]
+//
+// BE32-LABEL: @testrotatel8(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[RS_ADDR:%.*]] = alloca i64, align 8
+// BE32-NEXT:    [[SHIFT_ADDR:%.*]] = alloca i64, align 8
+// BE32-NEXT:    store i64 [[RS:%.*]], i64* [[RS_ADDR]], align 8
+// BE32-NEXT:    store i64 [[SHIFT:%.*]], i64* [[SHIFT_ADDR]], align 8
+// BE32-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS_ADDR]], align 8
+// BE32-NEXT:    [[TMP1:%.*]] = load i64, i64* [[SHIFT_ADDR]], align 8
+// BE32-NEXT:    [[TMP2:%.*]] = call i64 @llvm.fshl.i64(i64 [[TMP0]], i64 [[TMP0]], i64 [[TMP1]])
+// BE32-NEXT:    ret i64 [[TMP2]]
+//
+// LE32-LABEL: @testrotatel8(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[RS_ADDR:%.*]] = alloca i64, align 8
+// LE32-NEXT:    [[SHIFT_ADDR:%.*]] = alloca i64, align 8
+// LE32-NEXT:    store i64 [[RS:%.*]], i64* [[RS_ADDR]], align 8
+// LE32-NEXT:    store i64 [[SHIFT:%.*]], i64* [[SHIFT_ADDR]], align 8
+// LE32-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS_ADDR]], align 8
+// LE32-NEXT:    [[TMP1:%.*]] = load i64, i64* [[SHIFT_ADDR]], align 8
+// LE32-NEXT:    [[TMP2:%.*]] = call i64 @llvm.fshl.i64(i64 [[TMP0]], i64 [[TMP0]], i64 [[TMP1]])
+// LE32-NEXT:    ret i64 [[TMP2]]
+//
+unsigned long long testrotatel8(unsigned long long rs, unsigned long long shift) {
+  return __rotatel8(rs, shift);
+}
+
+// BE64-LABEL: @testrdlam(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[RS_ADDR:%.*]] = alloca i64, align 8
+// BE64-NEXT:    [[SHIFT_ADDR:%.*]] = alloca i32, align 4
+// BE64-NEXT:    store i64 [[RS:%.*]], i64* [[RS_ADDR]], align 8
+// BE64-NEXT:    store i32 [[SHIFT:%.*]], i32* [[SHIFT_ADDR]], align 4
+// BE64-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS_ADDR]], align 8
+// BE64-NEXT:    [[TMP1:%.*]] = load i32, i32* [[SHIFT_ADDR]], align 4
+// BE64-NEXT:    [[CONV:%.*]] = zext i32 [[TMP1]] to i64
+// BE64-NEXT:    [[TMP2:%.*]] = load i64, i64* [[RS_ADDR]], align 8
+// BE64-NEXT:    [[TMP3:%.*]] = load i32, i32* [[SHIFT_ADDR]], align 4
+// BE64-NEXT:    [[CONV1:%.*]] = zext i32 [[TMP3]] to i64
+// BE64-NEXT:    [[TMP4:%.*]] = call i64 @llvm.fshl.i64(i64 [[TMP2]], i64 [[TMP2]], i64 [[CONV1]])
+// BE64-NEXT:    [[TMP5:%.*]] = and i64 [[TMP4]], 7
+// BE64-NEXT:    ret i64 [[TMP5]]
+//
+// LE64-LABEL: @testrdlam(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[RS_ADDR:%.*]] = alloca i64, align 8
+// LE64-NEXT:    [[SHIFT_ADDR:%.*]] = alloca i32, align 4
+// LE64-NEXT:    store i64 [[RS:%.*]], i64* [[RS_ADDR]], align 8
+// LE64-NEXT:    store i32 [[SHIFT:%.*]], i32* [[SHIFT_ADDR]], align 4
+// LE64-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS_ADDR]], align 8
+// LE64-NEXT:    [[TMP1:%.*]] = load i32, i32* [[SHIFT_ADDR]], align 4
+// LE64-NEXT:    [[CONV:%.*]] = zext i32 [[TMP1]] to i64
+// LE64-NEXT:    [[TMP2:%.*]] = load i64, i64* [[RS_ADDR]], align 8
+// LE64-NEXT:    [[TMP3:%.*]] = load i32, i32* [[SHIFT_ADDR]], align 4
+// LE64-NEXT:    [[CONV1:%.*]] = zext i32 [[TMP3]] to i64
+// LE64-NEXT:    [[TMP4:%.*]] = call i64 @llvm.fshl.i64(i64 [[TMP2]], i64 [[TMP2]], i64 [[CONV1]])
+// LE64-NEXT:    [[TMP5:%.*]] = and i64 [[TMP4]], 7
+// LE64-NEXT:    ret i64 [[TMP5]]
+//
+// BE32-LABEL: @testrdlam(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[RS_ADDR:%.*]] = alloca i64, align 8
+// BE32-NEXT:    [[SHIFT_ADDR:%.*]] = alloca i32, align 4
+// BE32-NEXT:    store i64 [[RS:%.*]], i64* [[RS_ADDR]], align 8
+// BE32-NEXT:    store i32 [[SHIFT:%.*]], i32* [[SHIFT_ADDR]], align 4
+// BE32-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS_ADDR]], align 8
+// BE32-NEXT:    [[TMP1:%.*]] = load i32, i32* [[SHIFT_ADDR]], align 4
+// BE32-NEXT:    [[CONV:%.*]] = zext i32 [[TMP1]] to i64
+// BE32-NEXT:    [[TMP2:%.*]] = load i64, i64* [[RS_ADDR]], align 8
+// BE32-NEXT:    [[TMP3:%.*]] = load i32, i32* [[SHIFT_ADDR]], align 4
+// BE32-NEXT:    [[CONV1:%.*]] = zext i32 [[TMP3]] to i64
+// BE32-NEXT:    [[TMP4:%.*]] = call i64 @llvm.fshl.i64(i64 [[TMP2]], i64 [[TMP2]], i64 [[CONV1]])
+// BE32-NEXT:    [[TMP5:%.*]] = and i64 [[TMP4]], 7
+// BE32-NEXT:    ret i64 [[TMP5]]
+//
+// LE32-LABEL: @testrdlam(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[RS_ADDR:%.*]] = alloca i64, align 8
+// LE32-NEXT:    [[SHIFT_ADDR:%.*]] = alloca i32, align 4
+// LE32-NEXT:    store i64 [[RS:%.*]], i64* [[RS_ADDR]], align 8
+// LE32-NEXT:    store i32 [[SHIFT:%.*]], i32* [[SHIFT_ADDR]], align 4
+// LE32-NEXT:    [[TMP0:%.*]] = load i64, i64* [[RS_ADDR]], align 8
+// LE32-NEXT:    [[TMP1:%.*]] = load i32, i32* [[SHIFT_ADDR]], align 4
+// LE32-NEXT:    [[CONV:%.*]] = zext i32 [[TMP1]] to i64
+// LE32-NEXT:    [[TMP2:%.*]] = load i64, i64* [[RS_ADDR]], align 8
+// LE32-NEXT:    [[TMP3:%.*]] = load i32, i32* [[SHIFT_ADDR]], align 4
+// LE32-NEXT:    [[CONV1:%.*]] = zext i32 [[TMP3]] to i64
+// LE32-NEXT:    [[TMP4:%.*]] = call i64 @llvm.fshl.i64(i64 [[TMP2]], i64 [[TMP2]], i64 [[CONV1]])
+// LE32-NEXT:    [[TMP5:%.*]] = and i64 [[TMP4]], 7
+// LE32-NEXT:    ret i64 [[TMP5]]
+//
+unsigned long long testrdlam(unsigned long long rs, unsigned int shift) {
+  // The third parameter is a mask that must be a constant that represents a
+  // contiguous bit field.
+  return __rdlam(rs, shift, 7);
+}
Index: clang/test/CodeGen/builtins-ppc-xlcompat-popcnt.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/builtins-ppc-xlcompat-popcnt.c
@@ -0,0 +1,237 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple powerpc64-unknown-unknown -D__ppc64__ \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=BE64
+// RUN: %clang_cc1 -triple powerpc64le-unknown-unknown -D__ppc64__ \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=LE64
+// RUN: %clang_cc1 -triple powerpc-unknown-unknown \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=BE32
+// RUN: %clang_cc1 -triple powerpcle-unknown-unknown \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=LE32
+
+// BE64-LABEL: @testcntlz4(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[VALUE_ADDR:%.*]] = alloca i32, align 4
+// BE64-NEXT:    store i32 [[VALUE:%.*]], i32* [[VALUE_ADDR]], align 4
+// BE64-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VALUE_ADDR]], align 4
+// BE64-NEXT:    [[TMP1:%.*]] = call i32 @llvm.ctlz.i32(i32 [[TMP0]], i1 false)
+// BE64-NEXT:    ret i32 [[TMP1]]
+//
+// LE64-LABEL: @testcntlz4(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[VALUE_ADDR:%.*]] = alloca i32, align 4
+// LE64-NEXT:    store i32 [[VALUE:%.*]], i32* [[VALUE_ADDR]], align 4
+// LE64-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VALUE_ADDR]], align 4
+// LE64-NEXT:    [[TMP1:%.*]] = call i32 @llvm.ctlz.i32(i32 [[TMP0]], i1 false)
+// LE64-NEXT:    ret i32 [[TMP1]]
+//
+// BE32-LABEL: @testcntlz4(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[VALUE_ADDR:%.*]] = alloca i32, align 4
+// BE32-NEXT:    store i32 [[VALUE:%.*]], i32* [[VALUE_ADDR]], align 4
+// BE32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VALUE_ADDR]], align 4
+// BE32-NEXT:    [[TMP1:%.*]] = call i32 @llvm.ctlz.i32(i32 [[TMP0]], i1 false)
+// BE32-NEXT:    ret i32 [[TMP1]]
+//
+// LE32-LABEL: @testcntlz4(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[VALUE_ADDR:%.*]] = alloca i32, align 4
+// LE32-NEXT:    store i32 [[VALUE:%.*]], i32* [[VALUE_ADDR]], align 4
+// LE32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VALUE_ADDR]], align 4
+// LE32-NEXT:    [[TMP1:%.*]] = call i32 @llvm.ctlz.i32(i32 [[TMP0]], i1 false)
+// LE32-NEXT:    ret i32 [[TMP1]]
+//
+unsigned int testcntlz4(unsigned int value) {
+  return __cntlz4(value);
+}
+
+// BE64-LABEL: @testcntlz8(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[VALUE_ADDR:%.*]] = alloca i64, align 8
+// BE64-NEXT:    store i64 [[VALUE:%.*]], i64* [[VALUE_ADDR]], align 8
+// BE64-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VALUE_ADDR]], align 8
+// BE64-NEXT:    [[TMP1:%.*]] = call i64 @llvm.ctlz.i64(i64 [[TMP0]], i1 false)
+// BE64-NEXT:    [[CAST:%.*]] = trunc i64 [[TMP1]] to i32
+// BE64-NEXT:    ret i32 [[CAST]]
+//
+// LE64-LABEL: @testcntlz8(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[VALUE_ADDR:%.*]] = alloca i64, align 8
+// LE64-NEXT:    store i64 [[VALUE:%.*]], i64* [[VALUE_ADDR]], align 8
+// LE64-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VALUE_ADDR]], align 8
+// LE64-NEXT:    [[TMP1:%.*]] = call i64 @llvm.ctlz.i64(i64 [[TMP0]], i1 false)
+// LE64-NEXT:    [[CAST:%.*]] = trunc i64 [[TMP1]] to i32
+// LE64-NEXT:    ret i32 [[CAST]]
+//
+// BE32-LABEL: @testcntlz8(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[VALUE_ADDR:%.*]] = alloca i64, align 8
+// BE32-NEXT:    store i64 [[VALUE:%.*]], i64* [[VALUE_ADDR]], align 8
+// BE32-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VALUE_ADDR]], align 8
+// BE32-NEXT:    [[TMP1:%.*]] = call i64 @llvm.ctlz.i64(i64 [[TMP0]], i1 false)
+// BE32-NEXT:    [[CAST:%.*]] = trunc i64 [[TMP1]] to i32
+// BE32-NEXT:    ret i32 [[CAST]]
+//
+// LE32-LABEL: @testcntlz8(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[VALUE_ADDR:%.*]] = alloca i64, align 8
+// LE32-NEXT:    store i64 [[VALUE:%.*]], i64* [[VALUE_ADDR]], align 8
+// LE32-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VALUE_ADDR]], align 8
+// LE32-NEXT:    [[TMP1:%.*]] = call i64 @llvm.ctlz.i64(i64 [[TMP0]], i1 false)
+// LE32-NEXT:    [[CAST:%.*]] = trunc i64 [[TMP1]] to i32
+// LE32-NEXT:    ret i32 [[CAST]]
+//
+unsigned int testcntlz8(unsigned long long value) {
+  return __cntlz8(value);
+}
+
+// BE64-LABEL: @testcnttz4(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[VALUE_ADDR:%.*]] = alloca i32, align 4
+// BE64-NEXT:    store i32 [[VALUE:%.*]], i32* [[VALUE_ADDR]], align 4
+// BE64-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VALUE_ADDR]], align 4
+// BE64-NEXT:    [[TMP1:%.*]] = call i32 @llvm.cttz.i32(i32 [[TMP0]], i1 false)
+// BE64-NEXT:    ret i32 [[TMP1]]
+//
+// LE64-LABEL: @testcnttz4(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[VALUE_ADDR:%.*]] = alloca i32, align 4
+// LE64-NEXT:    store i32 [[VALUE:%.*]], i32* [[VALUE_ADDR]], align 4
+// LE64-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VALUE_ADDR]], align 4
+// LE64-NEXT:    [[TMP1:%.*]] = call i32 @llvm.cttz.i32(i32 [[TMP0]], i1 false)
+// LE64-NEXT:    ret i32 [[TMP1]]
+//
+// BE32-LABEL: @testcnttz4(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[VALUE_ADDR:%.*]] = alloca i32, align 4
+// BE32-NEXT:    store i32 [[VALUE:%.*]], i32* [[VALUE_ADDR]], align 4
+// BE32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VALUE_ADDR]], align 4
+// BE32-NEXT:    [[TMP1:%.*]] = call i32 @llvm.cttz.i32(i32 [[TMP0]], i1 false)
+// BE32-NEXT:    ret i32 [[TMP1]]
+//
+// LE32-LABEL: @testcnttz4(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[VALUE_ADDR:%.*]] = alloca i32, align 4
+// LE32-NEXT:    store i32 [[VALUE:%.*]], i32* [[VALUE_ADDR]], align 4
+// LE32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VALUE_ADDR]], align 4
+// LE32-NEXT:    [[TMP1:%.*]] = call i32 @llvm.cttz.i32(i32 [[TMP0]], i1 false)
+// LE32-NEXT:    ret i32 [[TMP1]]
+//
+unsigned int testcnttz4(unsigned int value) {
+  return __cnttz4(value);
+}
+
+// BE64-LABEL: @testcnttz8(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[VALUE_ADDR:%.*]] = alloca i64, align 8
+// BE64-NEXT:    store i64 [[VALUE:%.*]], i64* [[VALUE_ADDR]], align 8
+// BE64-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VALUE_ADDR]], align 8
+// BE64-NEXT:    [[TMP1:%.*]] = call i64 @llvm.cttz.i64(i64 [[TMP0]], i1 false)
+// BE64-NEXT:    [[CAST:%.*]] = trunc i64 [[TMP1]] to i32
+// BE64-NEXT:    ret i32 [[CAST]]
+//
+// LE64-LABEL: @testcnttz8(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[VALUE_ADDR:%.*]] = alloca i64, align 8
+// LE64-NEXT:    store i64 [[VALUE:%.*]], i64* [[VALUE_ADDR]], align 8
+// LE64-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VALUE_ADDR]], align 8
+// LE64-NEXT:    [[TMP1:%.*]] = call i64 @llvm.cttz.i64(i64 [[TMP0]], i1 false)
+// LE64-NEXT:    [[CAST:%.*]] = trunc i64 [[TMP1]] to i32
+// LE64-NEXT:    ret i32 [[CAST]]
+//
+// BE32-LABEL: @testcnttz8(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[VALUE_ADDR:%.*]] = alloca i64, align 8
+// BE32-NEXT:    store i64 [[VALUE:%.*]], i64* [[VALUE_ADDR]], align 8
+// BE32-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VALUE_ADDR]], align 8
+// BE32-NEXT:    [[TMP1:%.*]] = call i64 @llvm.cttz.i64(i64 [[TMP0]], i1 false)
+// BE32-NEXT:    [[CAST:%.*]] = trunc i64 [[TMP1]] to i32
+// BE32-NEXT:    ret i32 [[CAST]]
+//
+// LE32-LABEL: @testcnttz8(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[VALUE_ADDR:%.*]] = alloca i64, align 8
+// LE32-NEXT:    store i64 [[VALUE:%.*]], i64* [[VALUE_ADDR]], align 8
+// LE32-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VALUE_ADDR]], align 8
+// LE32-NEXT:    [[TMP1:%.*]] = call i64 @llvm.cttz.i64(i64 [[TMP0]], i1 false)
+// LE32-NEXT:    [[CAST:%.*]] = trunc i64 [[TMP1]] to i32
+// LE32-NEXT:    ret i32 [[CAST]]
+//
+unsigned int testcnttz8(unsigned long long value) {
+  return __cnttz8(value);
+}
+
+// BE64-LABEL: @testpopcnt4(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[VALUE_ADDR:%.*]] = alloca i32, align 4
+// BE64-NEXT:    store i32 [[VALUE:%.*]], i32* [[VALUE_ADDR]], align 4
+// BE64-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VALUE_ADDR]], align 4
+// BE64-NEXT:    [[TMP1:%.*]] = call i32 @llvm.ctpop.i32(i32 [[TMP0]])
+// BE64-NEXT:    ret i32 [[TMP1]]
+//
+// LE64-LABEL: @testpopcnt4(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[VALUE_ADDR:%.*]] = alloca i32, align 4
+// LE64-NEXT:    store i32 [[VALUE:%.*]], i32* [[VALUE_ADDR]], align 4
+// LE64-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VALUE_ADDR]], align 4
+// LE64-NEXT:    [[TMP1:%.*]] = call i32 @llvm.ctpop.i32(i32 [[TMP0]])
+// LE64-NEXT:    ret i32 [[TMP1]]
+//
+// BE32-LABEL: @testpopcnt4(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[VALUE_ADDR:%.*]] = alloca i32, align 4
+// BE32-NEXT:    store i32 [[VALUE:%.*]], i32* [[VALUE_ADDR]], align 4
+// BE32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VALUE_ADDR]], align 4
+// BE32-NEXT:    [[TMP1:%.*]] = call i32 @llvm.ctpop.i32(i32 [[TMP0]])
+// BE32-NEXT:    ret i32 [[TMP1]]
+//
+// LE32-LABEL: @testpopcnt4(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[VALUE_ADDR:%.*]] = alloca i32, align 4
+// LE32-NEXT:    store i32 [[VALUE:%.*]], i32* [[VALUE_ADDR]], align 4
+// LE32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[VALUE_ADDR]], align 4
+// LE32-NEXT:    [[TMP1:%.*]] = call i32 @llvm.ctpop.i32(i32 [[TMP0]])
+// LE32-NEXT:    ret i32 [[TMP1]]
+//
+int testpopcnt4(unsigned int value) {
+  return __popcnt4(value);
+}
+
+// BE64-LABEL: @testpopcnt8(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[VALUE_ADDR:%.*]] = alloca i64, align 8
+// BE64-NEXT:    store i64 [[VALUE:%.*]], i64* [[VALUE_ADDR]], align 8
+// BE64-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VALUE_ADDR]], align 8
+// BE64-NEXT:    [[TMP1:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP0]])
+// BE64-NEXT:    [[CAST:%.*]] = trunc i64 [[TMP1]] to i32
+// BE64-NEXT:    ret i32 [[CAST]]
+//
+// LE64-LABEL: @testpopcnt8(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[VALUE_ADDR:%.*]] = alloca i64, align 8
+// LE64-NEXT:    store i64 [[VALUE:%.*]], i64* [[VALUE_ADDR]], align 8
+// LE64-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VALUE_ADDR]], align 8
+// LE64-NEXT:    [[TMP1:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP0]])
+// LE64-NEXT:    [[CAST:%.*]] = trunc i64 [[TMP1]] to i32
+// LE64-NEXT:    ret i32 [[CAST]]
+//
+// BE32-LABEL: @testpopcnt8(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[VALUE_ADDR:%.*]] = alloca i64, align 8
+// BE32-NEXT:    store i64 [[VALUE:%.*]], i64* [[VALUE_ADDR]], align 8
+// BE32-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VALUE_ADDR]], align 8
+// BE32-NEXT:    [[TMP1:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP0]])
+// BE32-NEXT:    [[CAST:%.*]] = trunc i64 [[TMP1]] to i32
+// BE32-NEXT:    ret i32 [[CAST]]
+//
+// LE32-LABEL: @testpopcnt8(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[VALUE_ADDR:%.*]] = alloca i64, align 8
+// LE32-NEXT:    store i64 [[VALUE:%.*]], i64* [[VALUE_ADDR]], align 8
+// LE32-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VALUE_ADDR]], align 8
+// LE32-NEXT:    [[TMP1:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP0]])
+// LE32-NEXT:    [[CAST:%.*]] = trunc i64 [[TMP1]] to i32
+// LE32-NEXT:    ret i32 [[CAST]]
+//
+int testpopcnt8(unsigned long long value) {
+  return __popcnt8(value);
+}
Index: clang/test/CodeGen/builtins-ppc-xlcompat-mode.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/builtins-ppc-xlcompat-mode.c
@@ -0,0 +1,42 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple powerpc64-unknown-unknown \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s
+// RUN: %clang_cc1 -triple powerpc64le-unknown-unknown \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s
+// RUN: %clang_cc1 -triple powerpc-unknown-unknown \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s
+// RUN: %clang_cc1 -triple powerpcle-unknown-unknown \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s
+
+// CHECK-LABEL: @testreadflm(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call double @llvm.ppc.readflm()
+// CHECK-NEXT:    ret double [[TMP0]]
+//
+double testreadflm(void) {
+  return __readflm();
+}
+
+// CHECK-LABEL: @testsetflm(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    store double [[A:%.*]], double* [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.ppc.setflm(double [[TMP0]])
+// CHECK-NEXT:    ret double [[TMP1]]
+//
+double testsetflm(double a) {
+  return __setflm(a);
+}
+
+// CHECK-LABEL: @testsetrnd(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[MODE_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    store i32 [[MODE:%.*]], i32* [[MODE_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[MODE_ADDR]], align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.ppc.setrnd(i32 [[TMP0]])
+// CHECK-NEXT:    ret double [[TMP1]]
+//
+double testsetrnd(int mode) {
+  return __setrnd(mode);
+}
Index: clang/test/CodeGen/builtins-ppc-xlcompat-memory.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/builtins-ppc-xlcompat-memory.c
@@ -0,0 +1,251 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang --target=powerpc64-gnu-unknown -D__ppc64__ \
+// RUN:    -S -emit-llvm %s -o -  -mcpu=pwr8 | FileCheck %s --check-prefix=BE64
+// RUN: %clang --target=powerpc64le-gnu-unknown -D__ppc64__ \
+// RUN:   -S -emit-llvm %s -o -  -mcpu=pwr8 | FileCheck %s --check-prefix=LE64
+// RUN: %clang --target=powerpc-gnu-unknown \
+// RUN:    -S -emit-llvm %s -o -  -mcpu=pwr8 | FileCheck %s --check-prefix=BE32
+// RUN: %clang --target=powerpcle-gnu-unknown \
+// RUN:   -S -emit-llvm %s -o -  -mcpu=pwr8 | FileCheck %s --check-prefix=LE32
+
+// Will not be adding include files to avoid any dependencies on the system.
+// Required for size_t. Usually found in stddef.h.
+typedef __SIZE_TYPE__ size_t;
+
+// Required for bzero and bcopy. Usually in strings.h.
+extern void bcopy(const void *__src, void *__dest, size_t __n);
+extern void bzero(void *__s, size_t __n);
+
+// BE64-LABEL: @testalignx(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[POINTER_ADDR:%.*]] = alloca i8*, align 8
+// BE64-NEXT:    store i8* [[POINTER:%.*]], i8** [[POINTER_ADDR]], align 8
+// BE64-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[POINTER_ADDR]], align 8
+// BE64-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[POINTER_ADDR]], align 8
+// BE64-NEXT:    call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 16) ]
+// BE64-NEXT:    ret void
+//
+// LE64-LABEL: @testalignx(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[POINTER_ADDR:%.*]] = alloca i8*, align 8
+// LE64-NEXT:    store i8* [[POINTER:%.*]], i8** [[POINTER_ADDR]], align 8
+// LE64-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[POINTER_ADDR]], align 8
+// LE64-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[POINTER_ADDR]], align 8
+// LE64-NEXT:    call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 16) ]
+// LE64-NEXT:    ret void
+//
+// BE32-LABEL: @testalignx(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[POINTER_ADDR:%.*]] = alloca i8*, align 4
+// BE32-NEXT:    store i8* [[POINTER:%.*]], i8** [[POINTER_ADDR]], align 4
+// BE32-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[POINTER_ADDR]], align 4
+// BE32-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[POINTER_ADDR]], align 4
+// BE32-NEXT:    call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i32 16) ]
+// BE32-NEXT:    ret void
+//
+// LE32-LABEL: @testalignx(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[POINTER_ADDR:%.*]] = alloca i8*, align 4
+// LE32-NEXT:    store i8* [[POINTER:%.*]], i8** [[POINTER_ADDR]], align 4
+// LE32-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[POINTER_ADDR]], align 4
+// LE32-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[POINTER_ADDR]], align 4
+// LE32-NEXT:    call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i32 16) ]
+// LE32-NEXT:    ret void
+//
+void testalignx(const void *pointer) {
+  __alignx(16, pointer);
+}
+
+// BE64-LABEL: @testbcopy(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[SRC_ADDR:%.*]] = alloca i8*, align 8
+// BE64-NEXT:    [[DEST_ADDR:%.*]] = alloca i8*, align 8
+// BE64-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
+// BE64-NEXT:    store i8* [[SRC:%.*]], i8** [[SRC_ADDR]], align 8
+// BE64-NEXT:    store i8* [[DEST:%.*]], i8** [[DEST_ADDR]], align 8
+// BE64-NEXT:    store i64 [[N:%.*]], i64* [[N_ADDR]], align 8
+// BE64-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[SRC_ADDR]], align 8
+// BE64-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DEST_ADDR]], align 8
+// BE64-NEXT:    [[TMP2:%.*]] = load i64, i64* [[N_ADDR]], align 8
+// BE64-NEXT:    call void @bcopy(i8* [[TMP0]], i8* [[TMP1]], i64 [[TMP2]])
+// BE64-NEXT:    ret void
+//
+// LE64-LABEL: @testbcopy(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[SRC_ADDR:%.*]] = alloca i8*, align 8
+// LE64-NEXT:    [[DEST_ADDR:%.*]] = alloca i8*, align 8
+// LE64-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
+// LE64-NEXT:    store i8* [[SRC:%.*]], i8** [[SRC_ADDR]], align 8
+// LE64-NEXT:    store i8* [[DEST:%.*]], i8** [[DEST_ADDR]], align 8
+// LE64-NEXT:    store i64 [[N:%.*]], i64* [[N_ADDR]], align 8
+// LE64-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[SRC_ADDR]], align 8
+// LE64-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DEST_ADDR]], align 8
+// LE64-NEXT:    [[TMP2:%.*]] = load i64, i64* [[N_ADDR]], align 8
+// LE64-NEXT:    call void @bcopy(i8* [[TMP0]], i8* [[TMP1]], i64 [[TMP2]])
+// LE64-NEXT:    ret void
+//
+// BE32-LABEL: @testbcopy(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[SRC_ADDR:%.*]] = alloca i8*, align 4
+// BE32-NEXT:    [[DEST_ADDR:%.*]] = alloca i8*, align 4
+// BE32-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
+// BE32-NEXT:    store i8* [[SRC:%.*]], i8** [[SRC_ADDR]], align 4
+// BE32-NEXT:    store i8* [[DEST:%.*]], i8** [[DEST_ADDR]], align 4
+// BE32-NEXT:    store i32 [[N:%.*]], i32* [[N_ADDR]], align 4
+// BE32-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[SRC_ADDR]], align 4
+// BE32-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DEST_ADDR]], align 4
+// BE32-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
+// BE32-NEXT:    call void @bcopy(i8* [[TMP0]], i8* [[TMP1]], i32 [[TMP2]])
+// BE32-NEXT:    ret void
+//
+// LE32-LABEL: @testbcopy(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[SRC_ADDR:%.*]] = alloca i8*, align 4
+// LE32-NEXT:    [[DEST_ADDR:%.*]] = alloca i8*, align 4
+// LE32-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
+// LE32-NEXT:    store i8* [[SRC:%.*]], i8** [[SRC_ADDR]], align 4
+// LE32-NEXT:    store i8* [[DEST:%.*]], i8** [[DEST_ADDR]], align 4
+// LE32-NEXT:    store i32 [[N:%.*]], i32* [[N_ADDR]], align 4
+// LE32-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[SRC_ADDR]], align 4
+// LE32-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DEST_ADDR]], align 4
+// LE32-NEXT:    [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4
+// LE32-NEXT:    call void @bcopy(i8* [[TMP0]], i8* [[TMP1]], i32 [[TMP2]])
+// LE32-NEXT:    ret void
+//
+void testbcopy(const void *src, void *dest, size_t n) {
+  __bcopy(src, dest, n);
+}
+
+// BE64-LABEL: @testbzero(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[S_ADDR:%.*]] = alloca i8*, align 8
+// BE64-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
+// BE64-NEXT:    store i8* [[S:%.*]], i8** [[S_ADDR]], align 8
+// BE64-NEXT:    store i64 [[N:%.*]], i64* [[N_ADDR]], align 8
+// BE64-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[S_ADDR]], align 8
+// BE64-NEXT:    [[TMP1:%.*]] = load i64, i64* [[N_ADDR]], align 8
+// BE64-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[TMP0]], i8 0, i64 [[TMP1]], i1 false)
+// BE64-NEXT:    ret void
+//
+// LE64-LABEL: @testbzero(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[S_ADDR:%.*]] = alloca i8*, align 8
+// LE64-NEXT:    [[N_ADDR:%.*]] = alloca i64, align 8
+// LE64-NEXT:    store i8* [[S:%.*]], i8** [[S_ADDR]], align 8
+// LE64-NEXT:    store i64 [[N:%.*]], i64* [[N_ADDR]], align 8
+// LE64-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[S_ADDR]], align 8
+// LE64-NEXT:    [[TMP1:%.*]] = load i64, i64* [[N_ADDR]], align 8
+// LE64-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[TMP0]], i8 0, i64 [[TMP1]], i1 false)
+// LE64-NEXT:    ret void
+//
+// BE32-LABEL: @testbzero(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[S_ADDR:%.*]] = alloca i8*, align 4
+// BE32-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
+// BE32-NEXT:    store i8* [[S:%.*]], i8** [[S_ADDR]], align 4
+// BE32-NEXT:    store i32 [[N:%.*]], i32* [[N_ADDR]], align 4
+// BE32-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[S_ADDR]], align 4
+// BE32-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
+// BE32-NEXT:    call void @llvm.memset.p0i8.i32(i8* align 1 [[TMP0]], i8 0, i32 [[TMP1]], i1 false)
+// BE32-NEXT:    ret void
+//
+// LE32-LABEL: @testbzero(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[S_ADDR:%.*]] = alloca i8*, align 4
+// LE32-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
+// LE32-NEXT:    store i8* [[S:%.*]], i8** [[S_ADDR]], align 4
+// LE32-NEXT:    store i32 [[N:%.*]], i32* [[N_ADDR]], align 4
+// LE32-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[S_ADDR]], align 4
+// LE32-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
+// LE32-NEXT:    call void @llvm.memset.p0i8.i32(i8* align 1 [[TMP0]], i8 0, i32 [[TMP1]], i1 false)
+// LE32-NEXT:    ret void
+//
+void testbzero(void *s, size_t n) {
+  bzero(s, n);
+}
+
+// BE64-LABEL: @testdcbf(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[ADDR_ADDR:%.*]] = alloca i8*, align 8
+// BE64-NEXT:    store i8* [[ADDR:%.*]], i8** [[ADDR_ADDR]], align 8
+// BE64-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[ADDR_ADDR]], align 8
+// BE64-NEXT:    call void @llvm.ppc.dcbf(i8* [[TMP0]])
+// BE64-NEXT:    ret void
+//
+// LE64-LABEL: @testdcbf(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[ADDR_ADDR:%.*]] = alloca i8*, align 8
+// LE64-NEXT:    store i8* [[ADDR:%.*]], i8** [[ADDR_ADDR]], align 8
+// LE64-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[ADDR_ADDR]], align 8
+// LE64-NEXT:    call void @llvm.ppc.dcbf(i8* [[TMP0]])
+// LE64-NEXT:    ret void
+//
+// BE32-LABEL: @testdcbf(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[ADDR_ADDR:%.*]] = alloca i8*, align 4
+// BE32-NEXT:    store i8* [[ADDR:%.*]], i8** [[ADDR_ADDR]], align 4
+// BE32-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[ADDR_ADDR]], align 4
+// BE32-NEXT:    call void @llvm.ppc.dcbf(i8* [[TMP0]])
+// BE32-NEXT:    ret void
+//
+// LE32-LABEL: @testdcbf(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[ADDR_ADDR:%.*]] = alloca i8*, align 4
+// LE32-NEXT:    store i8* [[ADDR:%.*]], i8** [[ADDR_ADDR]], align 4
+// LE32-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[ADDR_ADDR]], align 4
+// LE32-NEXT:    call void @llvm.ppc.dcbf(i8* [[TMP0]])
+// LE32-NEXT:    ret void
+//
+void testdcbf(const void *addr) {
+  __dcbf(addr);
+}
+
+// BE64-LABEL: @testbuiltin_frame_address(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[TMP0:%.*]] = call i8* @llvm.frameaddress.p0i8(i32 7)
+// BE64-NEXT:    ret i8* [[TMP0]]
+//
+// LE64-LABEL: @testbuiltin_frame_address(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[TMP0:%.*]] = call i8* @llvm.frameaddress.p0i8(i32 7)
+// LE64-NEXT:    ret i8* [[TMP0]]
+//
+// BE32-LABEL: @testbuiltin_frame_address(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[TMP0:%.*]] = call i8* @llvm.frameaddress.p0i8(i32 7)
+// BE32-NEXT:    ret i8* [[TMP0]]
+//
+// LE32-LABEL: @testbuiltin_frame_address(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[TMP0:%.*]] = call i8* @llvm.frameaddress.p0i8(i32 7)
+// LE32-NEXT:    ret i8* [[TMP0]]
+//
+void *testbuiltin_frame_address() {
+  // Parameter is a constant in the range 0, 63.
+  return __builtin_frame_address(7);
+}
+
+// BE64-LABEL: @testbuiltin_return_address(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[TMP0:%.*]] = call i8* @llvm.returnaddress(i32 7)
+// BE64-NEXT:    ret i8* [[TMP0]]
+//
+// LE64-LABEL: @testbuiltin_return_address(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[TMP0:%.*]] = call i8* @llvm.returnaddress(i32 7)
+// LE64-NEXT:    ret i8* [[TMP0]]
+//
+// BE32-LABEL: @testbuiltin_return_address(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[TMP0:%.*]] = call i8* @llvm.returnaddress(i32 7)
+// BE32-NEXT:    ret i8* [[TMP0]]
+//
+// LE32-LABEL: @testbuiltin_return_address(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[TMP0:%.*]] = call i8* @llvm.returnaddress(i32 7)
+// LE32-NEXT:    ret i8* [[TMP0]]
+//
+void *testbuiltin_return_address() {
+  // Parameter is a constant in the range 0, 63.
+  return __builtin_return_address(7);
+}
Index: clang/test/CodeGen/builtins-ppc-xlcompat-fma.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/builtins-ppc-xlcompat-fma.c
@@ -0,0 +1,45 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple powerpc64-gnu-unknown \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s
+// RUN: %clang_cc1 -triple powerpc64le-gnu-unknown \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s
+// RUN: %clang_cc1 -triple powerpc-gnu-unknown \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s
+// RUN: %clang_cc1 -triple powerpcle-gnu-unknown \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s
+
+// CHECK-LABEL: @testfmadd(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    [[C_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    store double [[A:%.*]], double* [[A_ADDR]], align 8
+// CHECK-NEXT:    store double [[B:%.*]], double* [[B_ADDR]], align 8
+// CHECK-NEXT:    store double [[C:%.*]], double* [[C_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load double, double* [[B_ADDR]], align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = load double, double* [[C_ADDR]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = call double @llvm.fma.f64(double [[TMP0]], double [[TMP1]], double [[TMP2]])
+// CHECK-NEXT:    ret double [[TMP3]]
+//
+double testfmadd(double a, double b, double c) {
+  return __fmadd(a, b, c);
+}
+
+// CHECK-LABEL: @testfmadds(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca float, align 4
+// CHECK-NEXT:    [[C_ADDR:%.*]] = alloca float, align 4
+// CHECK-NEXT:    store float [[A:%.*]], float* [[A_ADDR]], align 4
+// CHECK-NEXT:    store float [[B:%.*]], float* [[B_ADDR]], align 4
+// CHECK-NEXT:    store float [[C:%.*]], float* [[C_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[B_ADDR]], align 4
+// CHECK-NEXT:    [[TMP2:%.*]] = load float, float* [[C_ADDR]], align 4
+// CHECK-NEXT:    [[TMP3:%.*]] = call float @llvm.fma.f32(float [[TMP0]], float [[TMP1]], float [[TMP2]])
+// CHECK-NEXT:    ret float [[TMP3]]
+//
+float testfmadds(float a, float b, float c) {
+  return __fmadds(a, b, c);
+}
Index: clang/test/CodeGen/builtins-ppc-xlcompat-expect.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/builtins-ppc-xlcompat-expect.c
@@ -0,0 +1,52 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple powerpc64-unknown-unknown \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=BE64
+// RUN: %clang_cc1 -triple powerpc64le-unknown-unknown \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=LE64
+// RUN: %clang_cc1 -triple powerpc-unknown-unknown \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=BE32
+// RUN: %clang_cc1 -triple powerpcle-unknown-unknown \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=LE32
+
+// BE64-LABEL: @testbuiltin_expect(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[EXPRESSION_ADDR:%.*]] = alloca i64, align 8
+// BE64-NEXT:    store i64 [[EXPRESSION:%.*]], i64* [[EXPRESSION_ADDR]], align 8
+// BE64-NEXT:    [[TMP0:%.*]] = load i64, i64* [[EXPRESSION_ADDR]], align 8
+// BE64-NEXT:    [[CMP:%.*]] = icmp eq i64 [[TMP0]], 23
+// BE64-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+// BE64-NEXT:    [[CONV1:%.*]] = sext i32 [[CONV]] to i64
+// BE64-NEXT:    ret i64 [[CONV1]]
+//
+// LE64-LABEL: @testbuiltin_expect(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[EXPRESSION_ADDR:%.*]] = alloca i64, align 8
+// LE64-NEXT:    store i64 [[EXPRESSION:%.*]], i64* [[EXPRESSION_ADDR]], align 8
+// LE64-NEXT:    [[TMP0:%.*]] = load i64, i64* [[EXPRESSION_ADDR]], align 8
+// LE64-NEXT:    [[CMP:%.*]] = icmp eq i64 [[TMP0]], 23
+// LE64-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+// LE64-NEXT:    [[CONV1:%.*]] = sext i32 [[CONV]] to i64
+// LE64-NEXT:    ret i64 [[CONV1]]
+//
+// BE32-LABEL: @testbuiltin_expect(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[EXPRESSION_ADDR:%.*]] = alloca i32, align 4
+// BE32-NEXT:    store i32 [[EXPRESSION:%.*]], i32* [[EXPRESSION_ADDR]], align 4
+// BE32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[EXPRESSION_ADDR]], align 4
+// BE32-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP0]], 23
+// BE32-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+// BE32-NEXT:    ret i32 [[CONV]]
+//
+// LE32-LABEL: @testbuiltin_expect(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[EXPRESSION_ADDR:%.*]] = alloca i32, align 4
+// LE32-NEXT:    store i32 [[EXPRESSION:%.*]], i32* [[EXPRESSION_ADDR]], align 4
+// LE32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[EXPRESSION_ADDR]], align 4
+// LE32-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP0]], 23
+// LE32-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+// LE32-NEXT:    ret i32 [[CONV]]
+//
+long testbuiltin_expect(long expression) {
+  // The second parameter is a long constant.
+  return __builtin_expect(expression, 23) == 23;
+}
Index: clang/test/CodeGen/builtins-ppc-xlcompat-div.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/builtins-ppc-xlcompat-div.c
@@ -0,0 +1,159 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple powerpc64-unknown-unknown -D__ppc64__ \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=BE64
+// RUN: %clang_cc1 -triple powerpc64le-unknown-unknown -D__ppc64__ \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=LE64
+// RUN: %clang_cc1 -triple powerpc-unknown-unknown \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=BE32
+// RUN: %clang_cc1 -triple powerpcle-unknown-unknown \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=LE32
+
+#ifdef __ppc64__
+// BE64-LABEL: @testdivde(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[DIVIDEND_ADDR:%.*]] = alloca i64, align 8
+// BE64-NEXT:    [[DIVISOR_ADDR:%.*]] = alloca i64, align 8
+// BE64-NEXT:    store i64 [[DIVIDEND:%.*]], i64* [[DIVIDEND_ADDR]], align 8
+// BE64-NEXT:    store i64 [[DIVISOR:%.*]], i64* [[DIVISOR_ADDR]], align 8
+// BE64-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DIVIDEND_ADDR]], align 8
+// BE64-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DIVISOR_ADDR]], align 8
+// BE64-NEXT:    [[TMP2:%.*]] = call i64 @llvm.ppc.divde(i64 [[TMP0]], i64 [[TMP1]])
+// BE64-NEXT:    ret i64 [[TMP2]]
+//
+// LE64-LABEL: @testdivde(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[DIVIDEND_ADDR:%.*]] = alloca i64, align 8
+// LE64-NEXT:    [[DIVISOR_ADDR:%.*]] = alloca i64, align 8
+// LE64-NEXT:    store i64 [[DIVIDEND:%.*]], i64* [[DIVIDEND_ADDR]], align 8
+// LE64-NEXT:    store i64 [[DIVISOR:%.*]], i64* [[DIVISOR_ADDR]], align 8
+// LE64-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DIVIDEND_ADDR]], align 8
+// LE64-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DIVISOR_ADDR]], align 8
+// LE64-NEXT:    [[TMP2:%.*]] = call i64 @llvm.ppc.divde(i64 [[TMP0]], i64 [[TMP1]])
+// LE64-NEXT:    ret i64 [[TMP2]]
+//
+long long testdivde(long long dividend, long long divisor) {
+  return __divde(dividend, divisor);
+}
+
+// BE64-LABEL: @testdivdeu(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[DIVIDEND_ADDR:%.*]] = alloca i64, align 8
+// BE64-NEXT:    [[DIVISOR_ADDR:%.*]] = alloca i64, align 8
+// BE64-NEXT:    store i64 [[DIVIDEND:%.*]], i64* [[DIVIDEND_ADDR]], align 8
+// BE64-NEXT:    store i64 [[DIVISOR:%.*]], i64* [[DIVISOR_ADDR]], align 8
+// BE64-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DIVIDEND_ADDR]], align 8
+// BE64-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DIVISOR_ADDR]], align 8
+// BE64-NEXT:    [[TMP2:%.*]] = call i64 @llvm.ppc.divdeu(i64 [[TMP0]], i64 [[TMP1]])
+// BE64-NEXT:    ret i64 [[TMP2]]
+//
+// LE64-LABEL: @testdivdeu(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[DIVIDEND_ADDR:%.*]] = alloca i64, align 8
+// LE64-NEXT:    [[DIVISOR_ADDR:%.*]] = alloca i64, align 8
+// LE64-NEXT:    store i64 [[DIVIDEND:%.*]], i64* [[DIVIDEND_ADDR]], align 8
+// LE64-NEXT:    store i64 [[DIVISOR:%.*]], i64* [[DIVISOR_ADDR]], align 8
+// LE64-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DIVIDEND_ADDR]], align 8
+// LE64-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DIVISOR_ADDR]], align 8
+// LE64-NEXT:    [[TMP2:%.*]] = call i64 @llvm.ppc.divdeu(i64 [[TMP0]], i64 [[TMP1]])
+// LE64-NEXT:    ret i64 [[TMP2]]
+//
+unsigned long long testdivdeu(unsigned long long dividend, unsigned long long divisor) {
+  return __divdeu(dividend, divisor);
+}
+#endif
+
+// BE64-LABEL: @testdivwe(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[DIVIDEND_ADDR:%.*]] = alloca i32, align 4
+// BE64-NEXT:    [[DIVISOR_ADDR:%.*]] = alloca i32, align 4
+// BE64-NEXT:    store i32 [[DIVIDEND:%.*]], i32* [[DIVIDEND_ADDR]], align 4
+// BE64-NEXT:    store i32 [[DIVISOR:%.*]], i32* [[DIVISOR_ADDR]], align 4
+// BE64-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DIVIDEND_ADDR]], align 4
+// BE64-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DIVISOR_ADDR]], align 4
+// BE64-NEXT:    [[TMP2:%.*]] = call i32 @llvm.ppc.divwe(i32 [[TMP0]], i32 [[TMP1]])
+// BE64-NEXT:    ret i32 [[TMP2]]
+//
+// LE64-LABEL: @testdivwe(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[DIVIDEND_ADDR:%.*]] = alloca i32, align 4
+// LE64-NEXT:    [[DIVISOR_ADDR:%.*]] = alloca i32, align 4
+// LE64-NEXT:    store i32 [[DIVIDEND:%.*]], i32* [[DIVIDEND_ADDR]], align 4
+// LE64-NEXT:    store i32 [[DIVISOR:%.*]], i32* [[DIVISOR_ADDR]], align 4
+// LE64-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DIVIDEND_ADDR]], align 4
+// LE64-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DIVISOR_ADDR]], align 4
+// LE64-NEXT:    [[TMP2:%.*]] = call i32 @llvm.ppc.divwe(i32 [[TMP0]], i32 [[TMP1]])
+// LE64-NEXT:    ret i32 [[TMP2]]
+//
+// BE32-LABEL: @testdivwe(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[DIVIDEND_ADDR:%.*]] = alloca i32, align 4
+// BE32-NEXT:    [[DIVISOR_ADDR:%.*]] = alloca i32, align 4
+// BE32-NEXT:    store i32 [[DIVIDEND:%.*]], i32* [[DIVIDEND_ADDR]], align 4
+// BE32-NEXT:    store i32 [[DIVISOR:%.*]], i32* [[DIVISOR_ADDR]], align 4
+// BE32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DIVIDEND_ADDR]], align 4
+// BE32-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DIVISOR_ADDR]], align 4
+// BE32-NEXT:    [[TMP2:%.*]] = call i32 @llvm.ppc.divwe(i32 [[TMP0]], i32 [[TMP1]])
+// BE32-NEXT:    ret i32 [[TMP2]]
+//
+// LE32-LABEL: @testdivwe(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[DIVIDEND_ADDR:%.*]] = alloca i32, align 4
+// LE32-NEXT:    [[DIVISOR_ADDR:%.*]] = alloca i32, align 4
+// LE32-NEXT:    store i32 [[DIVIDEND:%.*]], i32* [[DIVIDEND_ADDR]], align 4
+// LE32-NEXT:    store i32 [[DIVISOR:%.*]], i32* [[DIVISOR_ADDR]], align 4
+// LE32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DIVIDEND_ADDR]], align 4
+// LE32-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DIVISOR_ADDR]], align 4
+// LE32-NEXT:    [[TMP2:%.*]] = call i32 @llvm.ppc.divwe(i32 [[TMP0]], i32 [[TMP1]])
+// LE32-NEXT:    ret i32 [[TMP2]]
+//
+int testdivwe(int dividend, int divisor) {
+  return __divwe(dividend, divisor);
+}
+
+// BE64-LABEL: @testdivweu(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[DIVIDEND_ADDR:%.*]] = alloca i32, align 4
+// BE64-NEXT:    [[DIVISOR_ADDR:%.*]] = alloca i32, align 4
+// BE64-NEXT:    store i32 [[DIVIDEND:%.*]], i32* [[DIVIDEND_ADDR]], align 4
+// BE64-NEXT:    store i32 [[DIVISOR:%.*]], i32* [[DIVISOR_ADDR]], align 4
+// BE64-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DIVIDEND_ADDR]], align 4
+// BE64-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DIVISOR_ADDR]], align 4
+// BE64-NEXT:    [[TMP2:%.*]] = call i32 @llvm.ppc.divweu(i32 [[TMP0]], i32 [[TMP1]])
+// BE64-NEXT:    ret i32 [[TMP2]]
+//
+// LE64-LABEL: @testdivweu(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[DIVIDEND_ADDR:%.*]] = alloca i32, align 4
+// LE64-NEXT:    [[DIVISOR_ADDR:%.*]] = alloca i32, align 4
+// LE64-NEXT:    store i32 [[DIVIDEND:%.*]], i32* [[DIVIDEND_ADDR]], align 4
+// LE64-NEXT:    store i32 [[DIVISOR:%.*]], i32* [[DIVISOR_ADDR]], align 4
+// LE64-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DIVIDEND_ADDR]], align 4
+// LE64-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DIVISOR_ADDR]], align 4
+// LE64-NEXT:    [[TMP2:%.*]] = call i32 @llvm.ppc.divweu(i32 [[TMP0]], i32 [[TMP1]])
+// LE64-NEXT:    ret i32 [[TMP2]]
+//
+// BE32-LABEL: @testdivweu(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[DIVIDEND_ADDR:%.*]] = alloca i32, align 4
+// BE32-NEXT:    [[DIVISOR_ADDR:%.*]] = alloca i32, align 4
+// BE32-NEXT:    store i32 [[DIVIDEND:%.*]], i32* [[DIVIDEND_ADDR]], align 4
+// BE32-NEXT:    store i32 [[DIVISOR:%.*]], i32* [[DIVISOR_ADDR]], align 4
+// BE32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DIVIDEND_ADDR]], align 4
+// BE32-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DIVISOR_ADDR]], align 4
+// BE32-NEXT:    [[TMP2:%.*]] = call i32 @llvm.ppc.divweu(i32 [[TMP0]], i32 [[TMP1]])
+// BE32-NEXT:    ret i32 [[TMP2]]
+//
+// LE32-LABEL: @testdivweu(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[DIVIDEND_ADDR:%.*]] = alloca i32, align 4
+// LE32-NEXT:    [[DIVISOR_ADDR:%.*]] = alloca i32, align 4
+// LE32-NEXT:    store i32 [[DIVIDEND:%.*]], i32* [[DIVIDEND_ADDR]], align 4
+// LE32-NEXT:    store i32 [[DIVISOR:%.*]], i32* [[DIVISOR_ADDR]], align 4
+// LE32-NEXT:    [[TMP0:%.*]] = load i32, i32* [[DIVIDEND_ADDR]], align 4
+// LE32-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DIVISOR_ADDR]], align 4
+// LE32-NEXT:    [[TMP2:%.*]] = call i32 @llvm.ppc.divweu(i32 [[TMP0]], i32 [[TMP1]])
+// LE32-NEXT:    ret i32 [[TMP2]]
+//
+unsigned int testdivweu(unsigned int dividend, unsigned int divisor) {
+  return __divweu(dividend, divisor);
+}
Index: clang/test/CodeGen/builtins-ppc-xlcompat-darn.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/builtins-ppc-xlcompat-darn.c
@@ -0,0 +1,43 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple powerpc64-unknown-unknown -D__ppc64__ \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr9 | FileCheck %s --check-prefix=64BIT
+// RUN: %clang_cc1 -triple powerpc64le-unknown-unknown -D__ppc64__ \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr9 | FileCheck %s --check-prefix=64BIT
+// RUN: %clang_cc1 -triple powerpc-unknown-unknown \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr9 | FileCheck %s --check-prefix=32BIT
+// RUN: %clang_cc1 -triple powerpcle-unknown-unknown \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr9 | FileCheck %s --check-prefix=32BIT
+
+#ifdef __ppc64__
+// 64BIT-LABEL: @testdarn(
+// 64BIT-NEXT:  entry:
+// 64BIT-NEXT:    [[TMP0:%.*]] = call i64 @llvm.ppc.darn()
+// 64BIT-NEXT:    ret i64 [[TMP0]]
+//
+long long testdarn(void) {
+  return __darn();
+}
+
+// 64BIT-LABEL: @testdarn_raw(
+// 64BIT-NEXT:  entry:
+// 64BIT-NEXT:    [[TMP0:%.*]] = call i64 @llvm.ppc.darnraw()
+// 64BIT-NEXT:    ret i64 [[TMP0]]
+//
+long long testdarn_raw(void) {
+  return __darn_raw();
+}
+#endif
+
+// 64BIT-LABEL: @testdarn_32(
+// 64BIT-NEXT:  entry:
+// 64BIT-NEXT:    [[TMP0:%.*]] = call i32 @llvm.ppc.darn32()
+// 64BIT-NEXT:    ret i32 [[TMP0]]
+//
+// 32BIT-LABEL: @testdarn_32(
+// 32BIT-NEXT:  entry:
+// 32BIT-NEXT:    [[TMP0:%.*]] = call i32 @llvm.ppc.darn32()
+// 32BIT-NEXT:    ret i32 [[TMP0]]
+//
+int testdarn_32(void) {
+  return __darn_32();
+}
Index: clang/test/CodeGen/builtins-ppc-xlcompat-cmplx.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/builtins-ppc-xlcompat-cmplx.c
@@ -0,0 +1,159 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple powerpc64-unknown-unknown -D__ppc64__ \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=BE64
+// RUN: %clang_cc1 -triple powerpc64le-unknown-unknown -D__ppc64__ \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=LE64
+// RUN: %clang_cc1 -triple powerpc-unknown-unknown \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=BE32
+// RUN: %clang_cc1 -triple powerpcle-unknown-unknown \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=LE32
+
+// BE64-LABEL: @testcmplx(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
+// BE64-NEXT:    [[REAL_ADDR:%.*]] = alloca double, align 8
+// BE64-NEXT:    [[IMAG_ADDR:%.*]] = alloca double, align 8
+// BE64-NEXT:    store double [[REAL:%.*]], double* [[REAL_ADDR]], align 8
+// BE64-NEXT:    store double [[IMAG:%.*]], double* [[IMAG_ADDR]], align 8
+// BE64-NEXT:    [[TMP0:%.*]] = load double, double* [[REAL_ADDR]], align 8
+// BE64-NEXT:    [[TMP1:%.*]] = load double, double* [[IMAG_ADDR]], align 8
+// BE64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[RETVAL]], i32 0, i32 0
+// BE64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[RETVAL]], i32 0, i32 1
+// BE64-NEXT:    store double [[TMP0]], double* [[RETVAL_REALP]], align 8
+// BE64-NEXT:    store double [[TMP1]], double* [[RETVAL_IMAGP]], align 8
+// BE64-NEXT:    [[TMP2:%.*]] = load { double, double }, { double, double }* [[RETVAL]], align 8
+// BE64-NEXT:    ret { double, double } [[TMP2]]
+//
+// LE64-LABEL: @testcmplx(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
+// LE64-NEXT:    [[REAL_ADDR:%.*]] = alloca double, align 8
+// LE64-NEXT:    [[IMAG_ADDR:%.*]] = alloca double, align 8
+// LE64-NEXT:    store double [[REAL:%.*]], double* [[REAL_ADDR]], align 8
+// LE64-NEXT:    store double [[IMAG:%.*]], double* [[IMAG_ADDR]], align 8
+// LE64-NEXT:    [[TMP0:%.*]] = load double, double* [[REAL_ADDR]], align 8
+// LE64-NEXT:    [[TMP1:%.*]] = load double, double* [[IMAG_ADDR]], align 8
+// LE64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[RETVAL]], i32 0, i32 0
+// LE64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[RETVAL]], i32 0, i32 1
+// LE64-NEXT:    store double [[TMP0]], double* [[RETVAL_REALP]], align 8
+// LE64-NEXT:    store double [[TMP1]], double* [[RETVAL_IMAGP]], align 8
+// LE64-NEXT:    [[TMP2:%.*]] = load { double, double }, { double, double }* [[RETVAL]], align 8
+// LE64-NEXT:    ret { double, double } [[TMP2]]
+//
+// BE32-LABEL: @testcmplx(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[REAL_ADDR:%.*]] = alloca double, align 8
+// BE32-NEXT:    [[IMAG_ADDR:%.*]] = alloca double, align 8
+// BE32-NEXT:    store double [[REAL:%.*]], double* [[REAL_ADDR]], align 8
+// BE32-NEXT:    store double [[IMAG:%.*]], double* [[IMAG_ADDR]], align 8
+// BE32-NEXT:    [[TMP0:%.*]] = load double, double* [[REAL_ADDR]], align 8
+// BE32-NEXT:    [[TMP1:%.*]] = load double, double* [[IMAG_ADDR]], align 8
+// BE32-NEXT:    [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT:%.*]], i32 0, i32 0
+// BE32-NEXT:    [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 1
+// BE32-NEXT:    store double [[TMP0]], double* [[AGG_RESULT_REALP]], align 8
+// BE32-NEXT:    store double [[TMP1]], double* [[AGG_RESULT_IMAGP]], align 8
+// BE32-NEXT:    [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 0
+// BE32-NEXT:    [[AGG_RESULT_REAL:%.*]] = load double, double* [[AGG_RESULT_REALP1]], align 8
+// BE32-NEXT:    [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 1
+// BE32-NEXT:    [[AGG_RESULT_IMAG:%.*]] = load double, double* [[AGG_RESULT_IMAGP2]], align 8
+// BE32-NEXT:    [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 0
+// BE32-NEXT:    [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 1
+// BE32-NEXT:    store double [[AGG_RESULT_REAL]], double* [[AGG_RESULT_REALP3]], align 8
+// BE32-NEXT:    store double [[AGG_RESULT_IMAG]], double* [[AGG_RESULT_IMAGP4]], align 8
+// BE32-NEXT:    ret void
+//
+// LE32-LABEL: @testcmplx(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[REAL_ADDR:%.*]] = alloca double, align 8
+// LE32-NEXT:    [[IMAG_ADDR:%.*]] = alloca double, align 8
+// LE32-NEXT:    store double [[REAL:%.*]], double* [[REAL_ADDR]], align 8
+// LE32-NEXT:    store double [[IMAG:%.*]], double* [[IMAG_ADDR]], align 8
+// LE32-NEXT:    [[TMP0:%.*]] = load double, double* [[REAL_ADDR]], align 8
+// LE32-NEXT:    [[TMP1:%.*]] = load double, double* [[IMAG_ADDR]], align 8
+// LE32-NEXT:    [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT:%.*]], i32 0, i32 0
+// LE32-NEXT:    [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 1
+// LE32-NEXT:    store double [[TMP0]], double* [[AGG_RESULT_REALP]], align 8
+// LE32-NEXT:    store double [[TMP1]], double* [[AGG_RESULT_IMAGP]], align 8
+// LE32-NEXT:    [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 0
+// LE32-NEXT:    [[AGG_RESULT_REAL:%.*]] = load double, double* [[AGG_RESULT_REALP1]], align 8
+// LE32-NEXT:    [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 1
+// LE32-NEXT:    [[AGG_RESULT_IMAG:%.*]] = load double, double* [[AGG_RESULT_IMAGP2]], align 8
+// LE32-NEXT:    [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 0
+// LE32-NEXT:    [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 1
+// LE32-NEXT:    store double [[AGG_RESULT_REAL]], double* [[AGG_RESULT_REALP3]], align 8
+// LE32-NEXT:    store double [[AGG_RESULT_IMAG]], double* [[AGG_RESULT_IMAGP4]], align 8
+// LE32-NEXT:    ret void
+//
+double _Complex testcmplx(double real, double imag) {
+  return __cmplx(real, imag);
+}
+
+// BE64-LABEL: @testcmplxf(
+// BE64-NEXT:  entry:
+// BE64-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
+// BE64-NEXT:    [[REAL_ADDR:%.*]] = alloca float, align 4
+// BE64-NEXT:    [[IMAG_ADDR:%.*]] = alloca float, align 4
+// BE64-NEXT:    store float [[REAL:%.*]], float* [[REAL_ADDR]], align 4
+// BE64-NEXT:    store float [[IMAG:%.*]], float* [[IMAG_ADDR]], align 4
+// BE64-NEXT:    [[TMP0:%.*]] = load float, float* [[REAL_ADDR]], align 4
+// BE64-NEXT:    [[TMP1:%.*]] = load float, float* [[IMAG_ADDR]], align 4
+// BE64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 0
+// BE64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 1
+// BE64-NEXT:    store float [[TMP0]], float* [[RETVAL_REALP]], align 4
+// BE64-NEXT:    store float [[TMP1]], float* [[RETVAL_IMAGP]], align 4
+// BE64-NEXT:    [[TMP2:%.*]] = load { float, float }, { float, float }* [[RETVAL]], align 4
+// BE64-NEXT:    ret { float, float } [[TMP2]]
+//
+// LE64-LABEL: @testcmplxf(
+// LE64-NEXT:  entry:
+// LE64-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
+// LE64-NEXT:    [[REAL_ADDR:%.*]] = alloca float, align 4
+// LE64-NEXT:    [[IMAG_ADDR:%.*]] = alloca float, align 4
+// LE64-NEXT:    store float [[REAL:%.*]], float* [[REAL_ADDR]], align 4
+// LE64-NEXT:    store float [[IMAG:%.*]], float* [[IMAG_ADDR]], align 4
+// LE64-NEXT:    [[TMP0:%.*]] = load float, float* [[REAL_ADDR]], align 4
+// LE64-NEXT:    [[TMP1:%.*]] = load float, float* [[IMAG_ADDR]], align 4
+// LE64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 0
+// LE64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 1
+// LE64-NEXT:    store float [[TMP0]], float* [[RETVAL_REALP]], align 4
+// LE64-NEXT:    store float [[TMP1]], float* [[RETVAL_IMAGP]], align 4
+// LE64-NEXT:    [[TMP2:%.*]] = load { float, float }, { float, float }* [[RETVAL]], align 4
+// LE64-NEXT:    ret { float, float } [[TMP2]]
+//
+// BE32-LABEL: @testcmplxf(
+// BE32-NEXT:  entry:
+// BE32-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
+// BE32-NEXT:    [[REAL_ADDR:%.*]] = alloca float, align 4
+// BE32-NEXT:    [[IMAG_ADDR:%.*]] = alloca float, align 4
+// BE32-NEXT:    store float [[REAL:%.*]], float* [[REAL_ADDR]], align 4
+// BE32-NEXT:    store float [[IMAG:%.*]], float* [[IMAG_ADDR]], align 4
+// BE32-NEXT:    [[TMP0:%.*]] = load float, float* [[REAL_ADDR]], align 4
+// BE32-NEXT:    [[TMP1:%.*]] = load float, float* [[IMAG_ADDR]], align 4
+// BE32-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 0
+// BE32-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 1
+// BE32-NEXT:    store float [[TMP0]], float* [[RETVAL_REALP]], align 4
+// BE32-NEXT:    store float [[TMP1]], float* [[RETVAL_IMAGP]], align 4
+// BE32-NEXT:    [[TMP2:%.*]] = bitcast { float, float }* [[RETVAL]] to i64*
+// BE32-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP2]], align 4
+// BE32-NEXT:    ret i64 [[TMP3]]
+//
+// LE32-LABEL: @testcmplxf(
+// LE32-NEXT:  entry:
+// LE32-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
+// LE32-NEXT:    [[REAL_ADDR:%.*]] = alloca float, align 4
+// LE32-NEXT:    [[IMAG_ADDR:%.*]] = alloca float, align 4
+// LE32-NEXT:    store float [[REAL:%.*]], float* [[REAL_ADDR]], align 4
+// LE32-NEXT:    store float [[IMAG:%.*]], float* [[IMAG_ADDR]], align 4
+// LE32-NEXT:    [[TMP0:%.*]] = load float, float* [[REAL_ADDR]], align 4
+// LE32-NEXT:    [[TMP1:%.*]] = load float, float* [[IMAG_ADDR]], align 4
+// LE32-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 0
+// LE32-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 1
+// LE32-NEXT:    store float [[TMP0]], float* [[RETVAL_REALP]], align 4
+// LE32-NEXT:    store float [[TMP1]], float* [[RETVAL_IMAGP]], align 4
+// LE32-NEXT:    [[TMP2:%.*]] = bitcast { float, float }* [[RETVAL]] to i64*
+// LE32-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP2]], align 4
+// LE32-NEXT:    ret i64 [[TMP3]]
+//
+float _Complex testcmplxf(float real, float imag) {
+  return __cmplxf(real, imag);
+}
Index: clang/test/CodeGen/builtins-ppc-xlcompat-cipher.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/builtins-ppc-xlcompat-cipher.c
@@ -0,0 +1,159 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple powerpc64-unknown-unknown \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 -D__ppc64__ | FileCheck %s
+// RUN: %clang_cc1 -triple powerpc64le-unknown-unknown \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 -D__ppc64__ | FileCheck %s
+// RUN: %clang_cc1 -triple powerpc-unknown-unknown \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s
+// RUN: %clang_cc1 -triple powerpcle-unknown-unknown \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s
+
+// CHECK-LABEL: @testvcipher(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[STATE_ARRAY_ADDR:%.*]] = alloca <16 x i8>, align 16
+// CHECK-NEXT:    [[ROUND_KEY_ADDR:%.*]] = alloca <16 x i8>, align 16
+// CHECK-NEXT:    store <16 x i8> [[STATE_ARRAY:%.*]], <16 x i8>* [[STATE_ARRAY_ADDR]], align 16
+// CHECK-NEXT:    store <16 x i8> [[ROUND_KEY:%.*]], <16 x i8>* [[ROUND_KEY_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, <16 x i8>* [[STATE_ARRAY_ADDR]], align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
+// CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* [[ROUND_KEY_ADDR]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64>
+// CHECK-NEXT:    [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.crypto.vcipher(<2 x i64> [[TMP1]], <2 x i64> [[TMP3]])
+// CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8>
+// CHECK-NEXT:    ret <16 x i8> [[TMP5]]
+//
+vector unsigned char testvcipher(vector unsigned char state_array, vector unsigned char round_key) {
+  return __vcipher(state_array, round_key);
+}
+
+// CHECK-LABEL: @testvcipherlast(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[STATE_ARRAY_ADDR:%.*]] = alloca <16 x i8>, align 16
+// CHECK-NEXT:    [[ROUND_KEY_ADDR:%.*]] = alloca <16 x i8>, align 16
+// CHECK-NEXT:    store <16 x i8> [[STATE_ARRAY:%.*]], <16 x i8>* [[STATE_ARRAY_ADDR]], align 16
+// CHECK-NEXT:    store <16 x i8> [[ROUND_KEY:%.*]], <16 x i8>* [[ROUND_KEY_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, <16 x i8>* [[STATE_ARRAY_ADDR]], align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
+// CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* [[ROUND_KEY_ADDR]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64>
+// CHECK-NEXT:    [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.crypto.vcipherlast(<2 x i64> [[TMP1]], <2 x i64> [[TMP3]])
+// CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8>
+// CHECK-NEXT:    ret <16 x i8> [[TMP5]]
+//
+vector unsigned char testvcipherlast(vector unsigned char state_array, vector unsigned char round_key) {
+  return __vcipherlast(state_array, round_key);
+}
+
+// CHECK-LABEL: @testvncipher(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[STATE_ARRAY_ADDR:%.*]] = alloca <16 x i8>, align 16
+// CHECK-NEXT:    [[ROUND_KEY_ADDR:%.*]] = alloca <16 x i8>, align 16
+// CHECK-NEXT:    store <16 x i8> [[STATE_ARRAY:%.*]], <16 x i8>* [[STATE_ARRAY_ADDR]], align 16
+// CHECK-NEXT:    store <16 x i8> [[ROUND_KEY:%.*]], <16 x i8>* [[ROUND_KEY_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, <16 x i8>* [[STATE_ARRAY_ADDR]], align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
+// CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* [[ROUND_KEY_ADDR]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64>
+// CHECK-NEXT:    [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.crypto.vncipher(<2 x i64> [[TMP1]], <2 x i64> [[TMP3]])
+// CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8>
+// CHECK-NEXT:    ret <16 x i8> [[TMP5]]
+//
+vector unsigned char testvncipher(vector unsigned char state_array, vector unsigned char round_key) {
+  return __vncipher(state_array, round_key);
+}
+
+// CHECK-LABEL: @testvncipherlast(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[STATE_ARRAY_ADDR:%.*]] = alloca <16 x i8>, align 16
+// CHECK-NEXT:    [[ROUND_KEY_ADDR:%.*]] = alloca <16 x i8>, align 16
+// CHECK-NEXT:    store <16 x i8> [[STATE_ARRAY:%.*]], <16 x i8>* [[STATE_ARRAY_ADDR]], align 16
+// CHECK-NEXT:    store <16 x i8> [[ROUND_KEY:%.*]], <16 x i8>* [[ROUND_KEY_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, <16 x i8>* [[STATE_ARRAY_ADDR]], align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
+// CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* [[ROUND_KEY_ADDR]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x i64>
+// CHECK-NEXT:    [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.crypto.vncipherlast(<2 x i64> [[TMP1]], <2 x i64> [[TMP3]])
+// CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8>
+// CHECK-NEXT:    ret <16 x i8> [[TMP5]]
+//
+vector unsigned char testvncipherlast(vector unsigned char state_array, vector unsigned char round_key) {
+  return __vncipherlast(state_array, round_key);
+}
+
+// CHECK-LABEL: @testvpermxor(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca <16 x i8>, align 16
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca <16 x i8>, align 16
+// CHECK-NEXT:    [[MASK_ADDR:%.*]] = alloca <16 x i8>, align 16
+// CHECK-NEXT:    store <16 x i8> [[A:%.*]], <16 x i8>* [[A_ADDR]], align 16
+// CHECK-NEXT:    store <16 x i8> [[B:%.*]], <16 x i8>* [[B_ADDR]], align 16
+// CHECK-NEXT:    store <16 x i8> [[MASK:%.*]], <16 x i8>* [[MASK_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, <16 x i8>* [[A_ADDR]], align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* [[B_ADDR]], align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* [[MASK_ADDR]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[TMP2]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP3]]
+//
+vector unsigned char testvpermxor(vector unsigned char a, vector unsigned char b, vector unsigned char mask) {
+  return __vpermxor(a, b, mask);
+}
+
+// CHECK-LABEL: @testvpmsumb(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca <16 x i8>, align 16
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca <16 x i8>, align 16
+// CHECK-NEXT:    store <16 x i8> [[A:%.*]], <16 x i8>* [[A_ADDR]], align 16
+// CHECK-NEXT:    store <16 x i8> [[B:%.*]], <16 x i8>* [[B_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, <16 x i8>* [[A_ADDR]], align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* [[B_ADDR]], align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8> [[TMP0]], <16 x i8> [[TMP1]])
+// CHECK-NEXT:    ret <16 x i8> [[TMP2]]
+//
+vector unsigned char testvpmsumb(vector unsigned char a, vector unsigned char b) {
+  return __vpmsumb(a, b);
+}
+
+// CHECK-LABEL: @testvpmsumd(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca <2 x i64>, align 16
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca <2 x i64>, align 16
+// CHECK-NEXT:    store <2 x i64> [[A:%.*]], <2 x i64>* [[A_ADDR]], align 16
+// CHECK-NEXT:    store <2 x i64> [[B:%.*]], <2 x i64>* [[B_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i64>, <2 x i64>* [[A_ADDR]], align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[B_ADDR]], align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd(<2 x i64> [[TMP0]], <2 x i64> [[TMP1]])
+// CHECK-NEXT:    ret <2 x i64> [[TMP2]]
+//
+vector unsigned long long testvpmsumd(vector unsigned long long a, vector unsigned long long b) {
+  return __vpmsumd(a, b);
+}
+
+// CHECK-LABEL: @testvpmsumh(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca <8 x i16>, align 16
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca <8 x i16>, align 16
+// CHECK-NEXT:    store <8 x i16> [[A:%.*]], <8 x i16>* [[A_ADDR]], align 16
+// CHECK-NEXT:    store <8 x i16> [[B:%.*]], <8 x i16>* [[B_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i16>, <8 x i16>* [[A_ADDR]], align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[B_ADDR]], align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]])
+// CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+//
+vector unsigned short testvpmsumh(vector unsigned short a, vector unsigned short b) {
+  return __vpmsumh(a, b);
+}
+
+// CHECK-LABEL: @testvpmsumw(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca <4 x i32>, align 16
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca <4 x i32>, align 16
+// CHECK-NEXT:    store <4 x i32> [[A:%.*]], <4 x i32>* [[A_ADDR]], align 16
+// CHECK-NEXT:    store <4 x i32> [[B:%.*]], <4 x i32>* [[B_ADDR]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, <4 x i32>* [[A_ADDR]], align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[B_ADDR]], align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+//
+vector unsigned int testvpmsumw(vector unsigned int a, vector unsigned int b) {
+  return __vpmsumw(a, b);
+}
Index: clang/test/CodeGen/builtins-ppc-xlcompat-bpermd.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/builtins-ppc-xlcompat-bpermd.c
@@ -0,0 +1,21 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple powerpc64-unknown-unknown \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s
+// RUN: %clang_cc1 -triple powerpc64le-unknown-unknown \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s
+
+// Note that bpermd is 64 bit only.
+// CHECK-LABEL: @testbpermd(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[BIT_SELECTOR_ADDR:%.*]] = alloca i64, align 8
+// CHECK-NEXT:    [[SOURCE_ADDR:%.*]] = alloca i64, align 8
+// CHECK-NEXT:    store i64 [[BIT_SELECTOR:%.*]], i64* [[BIT_SELECTOR_ADDR]], align 8
+// CHECK-NEXT:    store i64 [[SOURCE:%.*]], i64* [[SOURCE_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i64, i64* [[BIT_SELECTOR_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* [[SOURCE_ADDR]], align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.ppc.bpermd(i64 [[TMP0]], i64 [[TMP1]])
+// CHECK-NEXT:    ret i64 [[TMP2]]
+//
+long long testbpermd(long long bit_selector, long long source) {
+  return __bpermd(bit_selector, source);
+}
Index: clang/test/CodeGen/builtins-ppc-xlcompat-alloca.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/builtins-ppc-xlcompat-alloca.c
@@ -0,0 +1,33 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple powerpc64-unknown-unknown -D__ppc64__ \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=64BIT
+// RUN: %clang_cc1 -triple powerpc64le-unknown-unknown -D__ppc64__ \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=64BIT
+// RUN: %clang_cc1 -triple powerpc-unknown-unknown \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=32BIT
+// RUN: %clang_cc1 -triple powerpcle-unknown-unknown \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=32BIT
+
+// Will not be adding include files to avoid any dependencies on the system.
+// Required for size_t. Usually found in stddef.h.
+typedef __SIZE_TYPE__ size_t;
+
+// 64BIT-LABEL: @testalloca(
+// 64BIT-NEXT:  entry:
+// 64BIT-NEXT:    [[SIZE_ADDR:%.*]] = alloca i64, align 8
+// 64BIT-NEXT:    store i64 [[SIZE:%.*]], i64* [[SIZE_ADDR]], align 8
+// 64BIT-NEXT:    [[TMP0:%.*]] = load i64, i64* [[SIZE_ADDR]], align 8
+// 64BIT-NEXT:    [[TMP1:%.*]] = alloca i8, i64 [[TMP0]], align 16
+// 64BIT-NEXT:    ret i8* [[TMP1]]
+//
+// 32BIT-LABEL: @testalloca(
+// 32BIT-NEXT:  entry:
+// 32BIT-NEXT:    [[SIZE_ADDR:%.*]] = alloca i32, align 4
+// 32BIT-NEXT:    store i32 [[SIZE:%.*]], i32* [[SIZE_ADDR]], align 4
+// 32BIT-NEXT:    [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4
+// 32BIT-NEXT:    [[TMP1:%.*]] = alloca i8, i32 [[TMP0]], align 16
+// 32BIT-NEXT:    ret i8* [[TMP1]]
+//
+void *testalloca(size_t size) {
+  return __alloca(size);
+}
Index: clang/test/CodeGen/builtins-ppc-xlcompat-abs.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/builtins-ppc-xlcompat-abs.c
@@ -0,0 +1,57 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple powerpc64-unknown-unknown \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=64BIT
+// RUN: %clang_cc1 -triple powerpc64le-unknown-unknown \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=64BIT
+// RUN: %clang_cc1 -triple powerpc-unknown-unknown \
+// RUN:    -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=32BIT
+// RUN: %clang_cc1 -triple powerpcle-unknown-unknown \
+// RUN:   -emit-llvm %s -o -  -target-cpu pwr8 | FileCheck %s --check-prefix=32BIT
+
+// 64BIT-LABEL: @testlabs(
+// 64BIT-NEXT:  entry:
+// 64BIT-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
+// 64BIT-NEXT:    store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
+// 64BIT-NEXT:    [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// 64BIT-NEXT:    [[NEG:%.*]] = sub nsw i64 0, [[TMP0]]
+// 64BIT-NEXT:    [[ABSCOND:%.*]] = icmp slt i64 [[TMP0]], 0
+// 64BIT-NEXT:    [[ABS:%.*]] = select i1 [[ABSCOND]], i64 [[NEG]], i64 [[TMP0]]
+// 64BIT-NEXT:    ret i64 [[ABS]]
+//
+// 32BIT-LABEL: @testlabs(
+// 32BIT-NEXT:  entry:
+// 32BIT-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
+// 32BIT-NEXT:    store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
+// 32BIT-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
+// 32BIT-NEXT:    [[NEG:%.*]] = sub nsw i32 0, [[TMP0]]
+// 32BIT-NEXT:    [[ABSCOND:%.*]] = icmp slt i32 [[TMP0]], 0
+// 32BIT-NEXT:    [[ABS:%.*]] = select i1 [[ABSCOND]], i32 [[NEG]], i32 [[TMP0]]
+// 32BIT-NEXT:    ret i32 [[ABS]]
+//
+signed long testlabs(signed long a) {
+  return __labs(a);
+}
+
+// 64BIT-LABEL: @testllabs(
+// 64BIT-NEXT:  entry:
+// 64BIT-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
+// 64BIT-NEXT:    store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
+// 64BIT-NEXT:    [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// 64BIT-NEXT:    [[NEG:%.*]] = sub nsw i64 0, [[TMP0]]
+// 64BIT-NEXT:    [[ABSCOND:%.*]] = icmp slt i64 [[TMP0]], 0
+// 64BIT-NEXT:    [[ABS:%.*]] = select i1 [[ABSCOND]], i64 [[NEG]], i64 [[TMP0]]
+// 64BIT-NEXT:    ret i64 [[ABS]]
+//
+// 32BIT-LABEL: @testllabs(
+// 32BIT-NEXT:  entry:
+// 32BIT-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
+// 32BIT-NEXT:    store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
+// 32BIT-NEXT:    [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// 32BIT-NEXT:    [[NEG:%.*]] = sub nsw i64 0, [[TMP0]]
+// 32BIT-NEXT:    [[ABSCOND:%.*]] = icmp slt i64 [[TMP0]], 0
+// 32BIT-NEXT:    [[ABS:%.*]] = select i1 [[ABSCOND]], i64 [[NEG]], i64 [[TMP0]]
+// 32BIT-NEXT:    ret i64 [[ABS]]
+//
+signed long long testllabs(signed long long a) {
+  return __llabs(a);
+}
Index: clang/lib/CodeGen/CGBuiltin.cpp
===================================================================
--- clang/lib/CodeGen/CGBuiltin.cpp
+++ clang/lib/CodeGen/CGBuiltin.cpp
@@ -15130,7 +15130,32 @@
     llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
     return Builder.CreateFDiv(One, Builder.CreateCall(F, X), "rsqrt");
   }
+  case PPC::BI__builtin_ppc_alignx: {
+    const Expr *Ptr = E->getArg(1);
+    Value *PtrValue = EmitScalarExpr(Ptr);
+    Value *AlignmentValue = EmitScalarExpr(E->getArg(0));
+    ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
+    if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
+      AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
+                                     llvm::Value::MaximumAlignment);
+
+    emitAlignmentAssumption(PtrValue, Ptr,
+                            /*The expr loc is sufficient.*/ SourceLocation(),
+                            AlignmentCI, nullptr);
+    return PtrValue;
+  }
+  case PPC::BI__builtin_ppc_rdlam: {
+    Value *Src = EmitScalarExpr(E->getArg(0));
+    Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
+
+    llvm::Type *Ty = Src->getType();
+    ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
 
+    Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
+    Value *Rotate = Builder.CreateCall(F, {Src, Src, ShiftAmt});
+    Value *Mask = EmitScalarExpr(E->getArg(2));
+    return Builder.CreateAnd(Rotate, Mask);
+  }
   // FMA variations
   case PPC::BI__builtin_vsx_xvmaddadp:
   case PPC::BI__builtin_vsx_xvmaddasp:
Index: clang/lib/Basic/Targets/PPC.cpp
===================================================================
--- clang/lib/Basic/Targets/PPC.cpp
+++ clang/lib/Basic/Targets/PPC.cpp
@@ -97,6 +97,46 @@
   Builder.defineMacro("__dcbtst", "__builtin_ppc_dcbtst");
   Builder.defineMacro("__dcbz", "__builtin_ppc_dcbz");
   Builder.defineMacro("__icbt", "__builtin_ppc_icbt");
+  Builder.defineMacro("__alloca", "__builtin_alloca");
+  Builder.defineMacro("__vcipher", "__builtin_altivec_crypto_vcipher");
+  Builder.defineMacro("__vcipherlast", "__builtin_altivec_crypto_vcipherlast");
+  Builder.defineMacro("__vncipher", "__builtin_altivec_crypto_vncipher");
+  Builder.defineMacro("__vncipherlast",
+                      "__builtin_altivec_crypto_vncipherlast");
+  Builder.defineMacro("__vpermxor", "__builtin_altivec_crypto_vpermxor");
+  Builder.defineMacro("__vpmsumb", "__builtin_altivec_crypto_vpmsumb");
+  Builder.defineMacro("__vpmsumd", "__builtin_altivec_crypto_vpmsumd");
+  Builder.defineMacro("__vpmsumh", "__builtin_altivec_crypto_vpmsumh");
+  Builder.defineMacro("__vpmsumw", "__builtin_altivec_crypto_vpmsumw");
+  Builder.defineMacro("__divde", "__builtin_divde");
+  Builder.defineMacro("__divwe", "__builtin_divwe");
+  Builder.defineMacro("__divdeu", "__builtin_divdeu");
+  Builder.defineMacro("__divweu", "__builtin_divweu");
+  Builder.defineMacro("__alignx", "__builtin_ppc_alignx");
+  Builder.defineMacro("__bcopy", "bcopy");
+  Builder.defineMacro("__bpermd", "__builtin_bpermd");
+  Builder.defineMacro("__cntlz4", "__builtin_clz");
+  Builder.defineMacro("__cntlz8", "__builtin_clzll");
+  Builder.defineMacro("__cmplx", "__builtin_complex");
+  Builder.defineMacro("__cmplxf", "__builtin_complex");
+  Builder.defineMacro("__cnttz4", "__builtin_ctz");
+  Builder.defineMacro("__cnttz8", "__builtin_ctzll");
+  Builder.defineMacro("__darn", "__builtin_darn");
+  Builder.defineMacro("__darn_32", "__builtin_darn_32");
+  Builder.defineMacro("__darn_raw", "__builtin_darn_raw");
+  Builder.defineMacro("__dcbf", "__builtin_dcbf");
+  Builder.defineMacro("__fmadd", "__builtin_fma");
+  Builder.defineMacro("__fmadds", "__builtin_fmaf");
+  Builder.defineMacro("__labs", "__builtin_labs");
+  Builder.defineMacro("__llabs", "__builtin_llabs");
+  Builder.defineMacro("__popcnt4", "__builtin_popcount");
+  Builder.defineMacro("__popcnt8", "__builtin_popcountll");
+  Builder.defineMacro("__readflm", "__builtin_readflm");
+  Builder.defineMacro("__rotatel4", "__builtin_rotateleft32");
+  Builder.defineMacro("__rotatel8", "__builtin_rotateleft64");
+  Builder.defineMacro("__rdlam", "__builtin_ppc_rdlam");
+  Builder.defineMacro("__setflm", "__builtin_setflm");
+  Builder.defineMacro("__setrnd", "__builtin_setrnd");
 }
 
 /// PPCTargetInfo::getTargetDefines - Return a set of the PowerPC-specific
Index: clang/include/clang/Basic/BuiltinsPPC.def
===================================================================
--- clang/include/clang/Basic/BuiltinsPPC.def
+++ clang/include/clang/Basic/BuiltinsPPC.def
@@ -45,6 +45,8 @@
 BUILTIN(__builtin_ppc_dcbtst, "vv*", "")
 BUILTIN(__builtin_ppc_dcbz, "vv*", "")
 BUILTIN(__builtin_ppc_icbt, "vv*", "")
+BUILTIN(__builtin_ppc_alignx, "viCvC*", "nc")
+BUILTIN(__builtin_ppc_rdlam, "UWiUWiUWiCUWi", "nc")
 
 BUILTIN(__builtin_ppc_get_timebase, "ULLi", "n")
 
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
  • [PATCH] D104386: [PowerPC]... Stefan Pintilie via Phabricator via cfe-commits

Reply via email to