erik.pilkington created this revision.
erik.pilkington added reviewers: rsmith, jfb, rjmccall.
Herald added subscribers: llvm-commits, kristina, arphaman, dexonsmith, 
jkorous, hiraditya.
Herald added projects: clang, LLVM.

http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0476r2.html

This adds a new (pseudo) builtin, `__builtin_bit_cast(T, v)`, which performs a 
bit_cast from a value v to a type T. This expression can be evaluated at 
compile time under specific circumstances. The compile time evaluation 
currently doesn't support bit-fields, but I'm planning on fixing this in a 
follow up (some of the logic for figuring this out is in CodeGen).

rdar://44987528

Thanks for taking a look!


Repository:
  rC Clang

https://reviews.llvm.org/D62825

Files:
  clang/include/clang-c/Index.h
  clang/include/clang/AST/ExprCXX.h
  clang/include/clang/AST/OperationKinds.def
  clang/include/clang/AST/RecursiveASTVisitor.h
  clang/include/clang/Basic/DiagnosticASTKinds.td
  clang/include/clang/Basic/DiagnosticSemaKinds.td
  clang/include/clang/Basic/Features.def
  clang/include/clang/Basic/StmtNodes.td
  clang/include/clang/Basic/TokenKinds.def
  clang/include/clang/Parse/Parser.h
  clang/include/clang/Sema/Sema.h
  clang/lib/AST/Expr.cpp
  clang/lib/AST/ExprClassification.cpp
  clang/lib/AST/ExprConstant.cpp
  clang/lib/AST/ItaniumMangle.cpp
  clang/lib/AST/StmtPrinter.cpp
  clang/lib/AST/StmtProfile.cpp
  clang/lib/CodeGen/CGExpr.cpp
  clang/lib/CodeGen/CGExprAgg.cpp
  clang/lib/CodeGen/CGExprComplex.cpp
  clang/lib/CodeGen/CGExprConstant.cpp
  clang/lib/CodeGen/CGExprScalar.cpp
  clang/lib/Edit/RewriteObjCFoundationAPI.cpp
  clang/lib/Parse/ParseExpr.cpp
  clang/lib/Parse/ParseExprCXX.cpp
  clang/lib/Sema/SemaCast.cpp
  clang/lib/Sema/SemaExceptionSpec.cpp
  clang/lib/Sema/TreeTransform.h
  clang/lib/Serialization/ASTReaderStmt.cpp
  clang/lib/Serialization/ASTWriterStmt.cpp
  clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
  clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
  clang/test/CodeGenCXX/builtin-bit-cast.cpp
  clang/test/SemaCXX/builtin-bit-cast.cpp
  clang/test/SemaCXX/constexpr-builtin-bit-cast.cpp
  clang/tools/libclang/CIndex.cpp
  clang/tools/libclang/CXCursor.cpp
  llvm/include/llvm/ADT/APInt.h
  llvm/lib/ExecutionEngine/ExecutionEngine.cpp
  llvm/lib/Support/APInt.cpp

Index: llvm/lib/Support/APInt.cpp
===================================================================
--- llvm/lib/Support/APInt.cpp
+++ llvm/lib/Support/APInt.cpp
@@ -2929,3 +2929,56 @@
   LLVM_DEBUG(dbgs() << __func__ << ": solution (wrap): " << X << '\n');
   return X;
 }
+
+/// StoreIntToMemory - Fills the StoreBytes bytes of memory starting from Dst
+/// with the integer held in IntVal.
+void llvm::StoreIntToMemory(const APInt &IntVal, uint8_t *Dst,
+                            unsigned StoreBytes) {
+  assert((IntVal.getBitWidth()+7)/8 >= StoreBytes && "Integer too small!");
+  const uint8_t *Src = (const uint8_t *)IntVal.getRawData();
+
+  if (sys::IsLittleEndianHost) {
+    // Little-endian host - the source is ordered from LSB to MSB.  Order the
+    // destination from LSB to MSB: Do a straight copy.
+    memcpy(Dst, Src, StoreBytes);
+  } else {
+    // Big-endian host - the source is an array of 64 bit words ordered from
+    // LSW to MSW.  Each word is ordered from MSB to LSB.  Order the destination
+    // from MSB to LSB: Reverse the word order, but not the bytes in a word.
+    while (StoreBytes > sizeof(uint64_t)) {
+      StoreBytes -= sizeof(uint64_t);
+      // May not be aligned so use memcpy.
+      memcpy(Dst + StoreBytes, Src, sizeof(uint64_t));
+      Src += sizeof(uint64_t);
+    }
+
+    memcpy(Dst, Src + sizeof(uint64_t) - StoreBytes, StoreBytes);
+  }
+}
+
+/// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting
+/// from Src into IntVal, which is assumed to be wide enough and to hold zero.
+void llvm::LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes) {
+  assert((IntVal.getBitWidth()+7)/8 >= LoadBytes && "Integer too small!");
+  uint8_t *Dst = reinterpret_cast<uint8_t *>(
+                   const_cast<uint64_t *>(IntVal.getRawData()));
+
+  if (sys::IsLittleEndianHost)
+    // Little-endian host - the destination must be ordered from LSB to MSB.
+    // The source is ordered from LSB to MSB: Do a straight copy.
+    memcpy(Dst, Src, LoadBytes);
+  else {
+    // Big-endian - the destination is an array of 64 bit words ordered from
+    // LSW to MSW.  Each word must be ordered from MSB to LSB.  The source is
+    // ordered from MSB to LSB: Reverse the word order, but not the bytes in
+    // a word.
+    while (LoadBytes > sizeof(uint64_t)) {
+      LoadBytes -= sizeof(uint64_t);
+      // May not be aligned so use memcpy.
+      memcpy(Dst, Src + LoadBytes, sizeof(uint64_t));
+      Dst += sizeof(uint64_t);
+    }
+
+    memcpy(Dst + sizeof(uint64_t) - LoadBytes, Src, LoadBytes);
+  }
+}
Index: llvm/lib/ExecutionEngine/ExecutionEngine.cpp
===================================================================
--- llvm/lib/ExecutionEngine/ExecutionEngine.cpp
+++ llvm/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -1019,32 +1019,6 @@
   return Result;
 }
 
-/// StoreIntToMemory - Fills the StoreBytes bytes of memory starting from Dst
-/// with the integer held in IntVal.
-static void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst,
-                             unsigned StoreBytes) {
-  assert((IntVal.getBitWidth()+7)/8 >= StoreBytes && "Integer too small!");
-  const uint8_t *Src = (const uint8_t *)IntVal.getRawData();
-
-  if (sys::IsLittleEndianHost) {
-    // Little-endian host - the source is ordered from LSB to MSB.  Order the
-    // destination from LSB to MSB: Do a straight copy.
-    memcpy(Dst, Src, StoreBytes);
-  } else {
-    // Big-endian host - the source is an array of 64 bit words ordered from
-    // LSW to MSW.  Each word is ordered from MSB to LSB.  Order the destination
-    // from MSB to LSB: Reverse the word order, but not the bytes in a word.
-    while (StoreBytes > sizeof(uint64_t)) {
-      StoreBytes -= sizeof(uint64_t);
-      // May not be aligned so use memcpy.
-      memcpy(Dst + StoreBytes, Src, sizeof(uint64_t));
-      Src += sizeof(uint64_t);
-    }
-
-    memcpy(Dst, Src + sizeof(uint64_t) - StoreBytes, StoreBytes);
-  }
-}
-
 void ExecutionEngine::StoreValueToMemory(const GenericValue &Val,
                                          GenericValue *Ptr, Type *Ty) {
   const unsigned StoreBytes = getDataLayout().getTypeStoreSize(Ty);
@@ -1092,33 +1066,6 @@
     std::reverse((uint8_t*)Ptr, StoreBytes + (uint8_t*)Ptr);
 }
 
-/// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting
-/// from Src into IntVal, which is assumed to be wide enough and to hold zero.
-static void LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes) {
-  assert((IntVal.getBitWidth()+7)/8 >= LoadBytes && "Integer too small!");
-  uint8_t *Dst = reinterpret_cast<uint8_t *>(
-                   const_cast<uint64_t *>(IntVal.getRawData()));
-
-  if (sys::IsLittleEndianHost)
-    // Little-endian host - the destination must be ordered from LSB to MSB.
-    // The source is ordered from LSB to MSB: Do a straight copy.
-    memcpy(Dst, Src, LoadBytes);
-  else {
-    // Big-endian - the destination is an array of 64 bit words ordered from
-    // LSW to MSW.  Each word must be ordered from MSB to LSB.  The source is
-    // ordered from MSB to LSB: Reverse the word order, but not the bytes in
-    // a word.
-    while (LoadBytes > sizeof(uint64_t)) {
-      LoadBytes -= sizeof(uint64_t);
-      // May not be aligned so use memcpy.
-      memcpy(Dst, Src + LoadBytes, sizeof(uint64_t));
-      Dst += sizeof(uint64_t);
-    }
-
-    memcpy(Dst + sizeof(uint64_t) - LoadBytes, Src, LoadBytes);
-  }
-}
-
 /// FIXME: document
 ///
 void ExecutionEngine::LoadValueFromMemory(GenericValue &Result,
Index: llvm/include/llvm/ADT/APInt.h
===================================================================
--- llvm/include/llvm/ADT/APInt.h
+++ llvm/include/llvm/ADT/APInt.h
@@ -2212,6 +2212,15 @@
 // See friend declaration above. This additional declaration is required in
 // order to compile LLVM with IBM xlC compiler.
 hash_code hash_value(const APInt &Arg);
-} // End of llvm namespace
+
+/// StoreIntToMemory - Fills the StoreBytes bytes of memory starting from Dst
+/// with the integer held in IntVal.
+void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst, unsigned StoreBytes);
+
+/// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting
+/// from Src into IntVal, which is assumed to be wide enough and to hold zero.
+void LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes);
+
+} // namespace llvm
 
 #endif
Index: clang/tools/libclang/CXCursor.cpp
===================================================================
--- clang/tools/libclang/CXCursor.cpp
+++ clang/tools/libclang/CXCursor.cpp
@@ -716,6 +716,8 @@
   case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
     K = CXCursor_OMPTargetTeamsDistributeSimdDirective;
     break;
+  case Stmt::BuiltinBitCastExprClass:
+    K = CXCursor_BuiltinBitCastExpr;
   }
 
   CXCursor C = { K, 0, { Parent, S, TU } };
Index: clang/tools/libclang/CIndex.cpp
===================================================================
--- clang/tools/libclang/CIndex.cpp
+++ clang/tools/libclang/CIndex.cpp
@@ -5190,6 +5190,8 @@
       return cxstring::createRef("CallExpr");
   case CXCursor_ObjCMessageExpr:
       return cxstring::createRef("ObjCMessageExpr");
+  case CXCursor_BuiltinBitCastExpr:
+    return cxstring::createRef("BuiltinBitCastExpr");
   case CXCursor_UnexposedStmt:
       return cxstring::createRef("UnexposedStmt");
   case CXCursor_DeclStmt:
Index: clang/test/SemaCXX/constexpr-builtin-bit-cast.cpp
===================================================================
--- /dev/null
+++ clang/test/SemaCXX/constexpr-builtin-bit-cast.cpp
@@ -0,0 +1,212 @@
+// RUN: %clang_cc1 -verify -std=c++2a -fsyntax-only -triple x86_64-apple-macosx10.14.0 %s
+// RUN: %clang_cc1 -verify -std=c++2a -fsyntax-only -triple aarch64_be-linux-gnu %s
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#  define LITTLE_END 1
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#  define LITTLE_END 0
+#else
+#  error "huh?"
+#endif
+
+template <class T, class V> struct is_same {
+  static constexpr bool value = false;
+};
+template <class T> struct is_same<T, T> {
+  static constexpr bool value = true;
+};
+
+static_assert(sizeof(int) == 4);
+static_assert(sizeof(long long) == 8);
+
+template <class To, class From>
+constexpr To bit_cast(const From &from) {
+  static_assert(sizeof(To) == sizeof(From));
+  static_assert(!is_same<To, From>::value);
+  return __builtin_bit_cast(To, from);
+}
+
+template <class Intermediate, class Init>
+constexpr int round_trip(const Init &init) {
+  return bit_cast<Init>(bit_cast<Intermediate>(init)) == init;
+}
+
+void test_int() {
+  static_assert(round_trip<unsigned>((int)-1));
+  static_assert(round_trip<unsigned>((int)0x12345678));
+  static_assert(round_trip<unsigned>((int)0x87654321));
+  static_assert(round_trip<unsigned>((int)0x0C05FEFE));
+}
+
+void test_array() {
+  constexpr unsigned char input[] = {0xCA, 0xFE, 0xBA, 0xBE};
+  constexpr unsigned expected = LITTLE_END ? 0xBEBAFECA : 0xCAFEBABE;
+  static_assert(bit_cast<unsigned>(input) == expected);
+}
+
+void test_record() {
+  struct int_splicer {
+    unsigned x;
+    unsigned y;
+
+    constexpr bool operator==(const int_splicer &other) const {
+      return other.x == x && other.y == y;
+    }
+  };
+
+  constexpr int_splicer splice{0x0C05FEFE, 0xCAFEBABE};
+
+  static_assert(bit_cast<unsigned long long>(splice) == LITTLE_END
+                ? 0xCAFEBABE0C05FEFE
+                : 0x0C05FEFECAFEBABE);
+
+  static_assert(bit_cast<int_splicer>(0xCAFEBABE0C05FEFE).x == LITTLE_END
+                ? 0x0C05FEFE
+                : 0xCAFEBABE);
+
+  static_assert(round_trip<unsigned long long>(splice));
+  static_assert(round_trip<long long>(splice));
+
+  struct base2 {
+  };
+
+  struct base3 {
+    unsigned z;
+  };
+
+  struct bases : int_splicer, base2, base3 {
+    unsigned doublez;
+  };
+
+  struct tuple4 {
+    unsigned x, y, z, doublez;
+
+    constexpr bool operator==(tuple4 const &other) const {
+      return x == other.x && y == other.y &&
+             z == other.z && doublez == other.doublez;
+    }
+  };
+  constexpr bases b = {{1, 2}, {}, {3}, 4};
+  constexpr tuple4 t4 = bit_cast<tuple4>(b);
+  static_assert(t4 == tuple4{1, 2, 3, 4});
+  static_assert(round_trip<tuple4>(b));
+}
+
+void test_partially_initialized() {
+  struct pad {
+    char x;
+    int y;
+  };
+
+  struct no_pad {
+    char x;
+    char p1, p2, p3; // expected-note {{subobject declared here}}
+    int y;
+  };
+
+  static_assert(sizeof(pad) == sizeof(no_pad));
+
+  constexpr pad pir{4, 4};
+  constexpr int piw = bit_cast<no_pad>(pir).x;
+  static_assert(piw == 4);
+
+  // expected-error@+2 {{constexpr variable 'bad' must be initialized by a constant expression}}
+  // expected-note@+1 {{subobject of type 'char' is not initialized}}
+  constexpr no_pad bad = bit_cast<no_pad>(pir);
+
+  constexpr pad fine = bit_cast<pad>(no_pad{1, 2, 3, 4, 5});
+  static_assert(fine.x == 1 && fine.y == 5);
+}
+
+void no_bitfields() {
+  // FIXME!
+  struct S {
+    unsigned char x : 8;
+  };
+
+  struct G {
+    unsigned char x : 8;
+  };
+
+  constexpr S s{0};
+  // expected-error@+2 {{constexpr variable 'g' must be initialized by a constant expression}}
+  // expected-note@+1 {{constexpr bit_cast involving bit-field in not yet supported}}
+  constexpr G g = __builtin_bit_cast(G, s);
+}
+
+void array_members() {
+  struct S {
+    int ar[3];
+
+    constexpr bool operator==(const S &rhs) {
+      return ar[0] == rhs.ar[0] && ar[1] == rhs.ar[1] && ar[2] == rhs.ar[2];
+    }
+  };
+
+  struct G {
+    int a, b, c;
+
+    constexpr bool operator==(const G &rhs) {
+      return a == rhs.a && b == rhs.b && c == rhs.c;
+    }
+  };
+
+  constexpr S s{{1, 2, 3}};
+  constexpr G g = bit_cast<G>(s);
+  static_assert(g.a == 1 && g.b == 2 && g.c == 3);
+
+  static_assert(round_trip<G>(s));
+  static_assert(round_trip<S>(g));
+}
+
+void bad_types() {
+  union X {
+    int x;
+  };
+
+  struct G {
+    int g;
+  };
+  // expected-error@+2 {{constexpr variable 'g' must be initialized by a constant expression}}
+  // expected-note@+1 {{cannot constexpr evaluate a bit_cast with a union type 'X'}}
+  constexpr G g = __builtin_bit_cast(G, X{0});
+  // expected-error@+2 {{constexpr variable 'x' must be initialized by a constant expression}}
+  // expected-note@+1 {{cannot constexpr evaluate a bit_cast with a union type 'X'}}
+  constexpr X x = __builtin_bit_cast(X, G{0});
+
+  struct has_pointer {
+    // expected-note@+1 2 {{invalid type 'int *' is a member of 'has_pointer'}}
+    int *ptr;
+  };
+
+  // expected-error@+2 {{constexpr variable 'ptr' must be initialized by a constant expression}}
+  // expected-note@+1 {{cannot constexpr evaluate a bit_cast with a pointer type 'int *'}}
+  constexpr unsigned long ptr = __builtin_bit_cast(unsigned long, has_pointer{0});
+  // expected-error@+2 {{constexpr variable 'hptr' must be initialized by a constant expression}}
+  // expected-note@+1 {{cannot constexpr evaluate a bit_cast with a pointer type 'int *'}}
+  constexpr has_pointer hptr =  __builtin_bit_cast(has_pointer, 0ul);
+}
+
+void backtrace() {
+  struct A {
+    // expected-note@+1 {{invalid type 'int *' is a member of 'A'}}
+    int *ptr;
+  };
+
+  struct B {
+    // expected-note@+1 {{invalid type 'A [10]' is a member of 'B'}}
+    A as[10];
+  };
+
+  // expected-note@+1 {{invalid type 'B' is a base of 'C'}}
+  struct C : B {
+  };
+
+  struct E {
+    unsigned long ar[10];
+  };
+
+  // expected-error@+2 {{constexpr variable 'e' must be initialized by a constant expression}}
+  // expected-note@+1 {{cannot constexpr evaluate a bit_cast with a pointer type 'int *'}}
+  constexpr E e = __builtin_bit_cast(E, C{});
+}
Index: clang/test/SemaCXX/builtin-bit-cast.cpp
===================================================================
--- /dev/null
+++ clang/test/SemaCXX/builtin-bit-cast.cpp
@@ -0,0 +1,24 @@
+// RUN: %clang_cc1 -verify -std=c++2a -fsyntax-only -triple x86_64-apple-macosx10.14.0 %s
+
+template <class T, T v>
+T instantiate() {
+  return __builtin_bit_cast(T, v);
+}
+
+int x = instantiate<int, 32>();
+
+struct secret_ctor {
+  char member;
+
+private: secret_ctor() = default;
+};
+
+void test1() {
+  secret_ctor c = __builtin_bit_cast(secret_ctor, (char)0);
+}
+
+void test2() {
+  constexpr int i = 0;
+  // expected-error@+1{{__builtin_bit_cast source size does not match destination size (4 and 1)}}
+  constexpr char c = __builtin_bit_cast(char, i);
+}
Index: clang/test/CodeGenCXX/builtin-bit-cast.cpp
===================================================================
--- /dev/null
+++ clang/test/CodeGenCXX/builtin-bit-cast.cpp
@@ -0,0 +1,112 @@
+// RUN: %clang_cc1 -std=c++2a -S -emit-llvm -o - -disable-llvm-passes -triple x86_64-apple-macos10.14 %s | FileCheck %s
+
+void test_scalar() {
+  // CHECK-LABEL: define void @_Z11test_scalarv
+
+  __builtin_bit_cast(float, 43);
+
+  // CHECK: [[BIT_CAST_TEMP_INT:%.*]] = alloca i32, align 4
+  // CHECK: [[BIT_CAST_TEMP_FLOAT:%.*]] = alloca float, align 4
+
+  // CHECK: store i32 43, i32* [[BIT_CAST_TEMP_INT]], align 4
+  // ...bitcast the memcpy operands to i8*...
+  // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
+  // CHECK-NEXT: [[TMP:%.*]] = bitcast i8* %1 to float*
+  // CHECK-NEXT: load float, float* [[TMP]], align 4
+}
+
+struct two_ints {
+  int x;
+  int y;
+};
+
+void test_aggregate_to_scalar() {
+  // CHECK-LABEL: define void @_Z24test_aggregate_to_scalarv
+  __builtin_bit_cast(unsigned long, two_ints{11, 12});
+
+  // CHECK: [[SRC_TEMP:%.*]] = alloca %struct.two_ints, align 4
+  // CHECK-NEXT: [[DST_TEMP:%.*]] = alloca i64, align 8
+  // CHECK: store i32 11, i32* {{.*}}, align 4
+  // CHECK: store i32 12, i32* {{.*}}, align 4
+  // ...bitcast the memcpy operands to i8*...
+  // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
+
+  // CHECK: [[TMP:%.*]] = bitcast i8* {{.*}} to i64*
+  // load i64, i64* [[TMP]], align 8
+}
+
+void test_aggregate_to_scalar_lv() {
+  // CHECK-LABEL: define void @_Z27test_aggregate_to_scalar_lvv
+  two_ints lv{11, 12}; // CHECK: alloca
+  __builtin_bit_cast(unsigned long, lv);
+
+  // CHECK: [[SOURCE:%.*]] = alloca %struct.two_ints, align 4
+  // CHECK-NEXT: [[DEST:%.*]] = alloca i64, align 8
+  // CHECK-NEXT: [[LVTMP:%.*]] = bitcast %struct.two_ints* {{.*}} to i8*
+  // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[LVTMP]]
+  // CHECK: [[SRC_TEMP:%.*]] = bitcast %struct.two_ints* [[SOURCE]] to i8*
+  // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[SRC_TEMP]]
+  // CHECK: bitcast
+  // CHECK: [[DEST_TEMP:%.*]] = bitcast i64* [[DEST]] to i8*
+  // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[DEST_TEMP]]
+}
+
+struct two_floats {
+  float x;
+  float y;
+};
+
+void test_aggregate_record() {
+  // CHECK-LABEL: define void @_Z21test_aggregate_recordv
+  two_floats tf = __builtin_bit_cast(two_floats, two_ints{1, 2});
+
+  // CHECK: [[TF:%.*]] = alloca %struct.two_floats, align 4
+  // CHECK-NEXT: [[TFTMP:%.*]] = bitcast %struct.two_floats* [[TF]] to i8*
+  // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TFTMP]],
+}
+
+void test_aggregate_array() {
+  // CHECK-LABEL: define void @_Z20test_aggregate_arrayv
+  two_floats tf = __builtin_bit_cast(two_floats, (int [2]) {42, 32});
+
+  // CHECK: [[TF:%.*]] = alloca %struct.two_floats, align 4
+  // CHECK-NEXT: [[TFTMP:%.*]] = bitcast %struct.two_floats* [[TF]] to i8*
+  // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TFTMP]],
+}
+
+void test_scalar_to_aggregate() {
+  // CHECK-LABEL: define void @_Z24test_scalar_to_aggregatev
+  two_ints ti = __builtin_bit_cast(two_ints, 24ul);
+
+  // CHECK: [[TI:%.*]] = alloca %struct.two_ints, align 4
+  // CHECK-NEXT: [[TITMP:%.*]] = bitcast %struct.two_ints* [[TI]] to i8*
+  // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TITMP]]
+}
+
+void test_complex() {
+  // CHECK-LABEL: define void @_Z12test_complexv
+  using complex_t = _Complex unsigned;
+  __builtin_bit_cast(unsigned long, complex_t(43));
+
+  // CHECK: [[SOURCE:%.*]] = alloca { i32, i32 }, align 4
+  // CHECK-NEXT: [[DEST:%.*]] = alloca i64, align 8
+  // CHECK: bitcast { i32, i32 }* [[SOURCE]] to i8*
+  // CHECK-NEXT: [[DEST_TEMP:%.*]] = bitcast i64* [[DEST]] to i8*
+  // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[DEST_TEMP]]
+}
+
+void test_to_complex() {
+  // CHECK-LABEL: define void @_Z15test_to_complexv
+
+  using complex_t = _Complex unsigned;
+  auto ul = __builtin_bit_cast(complex_t, 32ul);
+
+  // CHECK: [[UL:%.*]] = alloca { i32, i32 }
+  // CHECK-NEXT: [[BC_SRC:%.*]] = alloca i64, align 8
+  // CHECK-NEXT: [[BC_DST:%.*]] = alloca { i32, i32 }, align 4
+  // CHECK-NEXT: store i64 32, i64* [[BC_SRC]], align 8
+  // CHECK-NEXT: bitcast i64* [[BC_SRC]] to i8*
+  // CHECK-NEXT: [[DST_TEMP:%.*]] = bitcast { i32, i32 }* [[BC_DST]] to i8*
+  // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[DST_TEMP]]
+  // CHECK-NEXT: bitcast i8* [[DST_TEMP]] to { i32, i32 }*
+}
Index: clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
===================================================================
--- clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -376,6 +376,7 @@
       case CK_Dependent:
       case CK_ArrayToPointerDecay:
       case CK_BitCast:
+      case CK_CXXBitCast:
       case CK_AddressSpaceConversion:
       case CK_BooleanToSignedIntegral:
       case CK_NullToPointer:
Index: clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
===================================================================
--- clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -1288,7 +1288,8 @@
     case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
     case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
     case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
-    case Stmt::CapturedStmtClass: {
+    case Stmt::CapturedStmtClass:
+    case Stmt::BuiltinBitCastExprClass: {
       const ExplodedNode *node = Bldr.generateSink(S, Pred, Pred->getState());
       Engine.addAbortedBlock(node, currBldrCtx->getBlock());
       break;
Index: clang/lib/Serialization/ASTWriterStmt.cpp
===================================================================
--- clang/lib/Serialization/ASTWriterStmt.cpp
+++ clang/lib/Serialization/ASTWriterStmt.cpp
@@ -1432,6 +1432,12 @@
   Code = serialization::EXPR_CXX_FUNCTIONAL_CAST;
 }
 
+void ASTStmtWriter::VisitBuiltinBitCastExpr(BuiltinBitCastExpr *E) {
+  VisitExplicitCastExpr(E);
+  Record.AddSourceLocation(E->getBeginLoc());
+  Record.AddSourceLocation(E->getEndLoc());
+}
+
 void ASTStmtWriter::VisitUserDefinedLiteral(UserDefinedLiteral *E) {
   VisitCallExpr(E);
   Record.AddSourceLocation(E->UDSuffixLoc);
Index: clang/lib/Serialization/ASTReaderStmt.cpp
===================================================================
--- clang/lib/Serialization/ASTReaderStmt.cpp
+++ clang/lib/Serialization/ASTReaderStmt.cpp
@@ -1458,6 +1458,12 @@
   E->setRParenLoc(ReadSourceLocation());
 }
 
+void ASTStmtReader::VisitBuiltinBitCastExpr(BuiltinBitCastExpr *E) {
+  VisitExplicitCastExpr(E);
+  E->KWLoc = ReadSourceLocation();
+  E->RParenLoc = ReadSourceLocation();
+}
+
 void ASTStmtReader::VisitUserDefinedLiteral(UserDefinedLiteral *E) {
   VisitCallExpr(E);
   E->UDSuffixLoc = ReadSourceLocation();
Index: clang/lib/Sema/TreeTransform.h
===================================================================
--- clang/lib/Sema/TreeTransform.h
+++ clang/lib/Sema/TreeTransform.h
@@ -10244,6 +10244,22 @@
       E->getAngleBrackets().getEnd(), SubExpr.get(), E->getRParenLoc());
 }
 
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformBuiltinBitCastExpr(BuiltinBitCastExpr *BCE) {
+  TypeSourceInfo *TSI =
+      getDerived().TransformType(BCE->getTypeInfoAsWritten());
+  if (!TSI)
+    return ExprError();
+
+  ExprResult Sub = getDerived().TransformExpr(BCE->getSubExpr());
+  if (Sub.isInvalid())
+    return ExprError();
+
+  return getSema().BuildBuiltinBitCastExpr(BCE->getBeginLoc(), TSI, Sub.get(),
+                                           BCE->getEndLoc());
+}
+
 template<typename Derived>
 ExprResult
 TreeTransform<Derived>::TransformCXXStaticCastExpr(CXXStaticCastExpr *E) {
Index: clang/lib/Sema/SemaExceptionSpec.cpp
===================================================================
--- clang/lib/Sema/SemaExceptionSpec.cpp
+++ clang/lib/Sema/SemaExceptionSpec.cpp
@@ -1204,6 +1204,7 @@
   case Expr::ShuffleVectorExprClass:
   case Expr::ConvertVectorExprClass:
   case Expr::VAArgExprClass:
+  case Expr::BuiltinBitCastExprClass:
     return canSubExprsThrow(*this, E);
 
     // Some might be dependent for other reasons.
Index: clang/lib/Sema/SemaCast.cpp
===================================================================
--- clang/lib/Sema/SemaCast.cpp
+++ clang/lib/Sema/SemaCast.cpp
@@ -87,6 +87,7 @@
     void CheckDynamicCast();
     void CheckCXXCStyleCast(bool FunctionalCast, bool ListInitialization);
     void CheckCStyleCast();
+    void CheckBitCast();
 
     void updatePartOfExplicitCastFlags(CastExpr *CE) {
       // Walk down from the CE to the OrigSrcExpr, and mark all immediate
@@ -331,6 +332,38 @@
   }
 }
 
+ExprResult Sema::ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &D,
+                                         ExprResult Operand,
+                                         SourceLocation RParenLoc) {
+  assert(!D.isInvalidType());
+
+  TypeSourceInfo *TInfo = GetTypeForDeclaratorCast(D, Operand.get()->getType());
+  if (D.isInvalidType())
+    return ExprError();
+
+  return BuildBuiltinBitCastExpr(KWLoc, TInfo, Operand.get(), RParenLoc);
+}
+
+ExprResult Sema::BuildBuiltinBitCastExpr(SourceLocation KWLoc,
+                                         TypeSourceInfo *TSI, Expr *Operand,
+                                         SourceLocation RParenLoc) {
+  CastOperation Op(*this, TSI->getType(), Operand);
+  Op.OpRange = SourceRange(KWLoc, RParenLoc);
+  TypeLoc TL = TSI->getTypeLoc();
+  Op.DestRange = SourceRange(TL.getBeginLoc(), TL.getEndLoc());
+
+  if (!Operand->isTypeDependent() && !TSI->getType()->isDependentType()) {
+    Op.CheckBitCast();
+    if (Op.SrcExpr.isInvalid())
+      return ExprError();
+  }
+
+  BuiltinBitCastExpr *BCE =
+      new (Context) BuiltinBitCastExpr(Op.ResultType, Op.ValueKind, Op.Kind,
+                                       Op.SrcExpr.get(), TSI, KWLoc, RParenLoc);
+  return Op.complete(BCE);
+}
+
 /// Try to diagnose a failed overloaded cast.  Returns true if
 /// diagnostics were emitted.
 static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
@@ -953,7 +986,7 @@
 
   unsigned msg = diag::err_bad_cxx_cast_generic;
   TryCastResult tcr =
-    TryReinterpretCast(Self, SrcExpr, DestType,
+      TryReinterpretCast(Self, SrcExpr, DestType,
                        /*CStyle*/false, OpRange, msg, Kind);
   if (tcr != TC_Success && msg != 0) {
     if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
@@ -2764,6 +2797,40 @@
     checkCastAlign();
 }
 
+void CastOperation::CheckBitCast() {
+  if (isPlaceholder())
+    SrcExpr = Self.CheckPlaceholderExpr(SrcExpr.get());
+
+  if (DestType->isDependentType() || SrcExpr.get()->isTypeDependent() ||
+      SrcExpr.get()->isValueDependent())
+    return;
+
+  QualType SrcType = SrcExpr.get()->getType();
+  CharUnits DestSize = Self.Context.getTypeSizeInChars(DestType);
+  CharUnits SourceSize = Self.Context.getTypeSizeInChars(SrcType);
+  if (DestSize != SourceSize) {
+    Self.Diag(OpRange.getBegin(), diag::err_bit_cast_type_size_mismatch)
+        << (int)SourceSize.getQuantity() << (int)DestSize.getQuantity();
+    SrcExpr = ExprError();
+    return;
+  }
+
+  if (!DestType.isTriviallyCopyableType(Self.Context)) {
+    Self.Diag(OpRange.getBegin(), diag::err_bit_cast_non_trivially_copyable)
+        << 1;
+    SrcExpr = ExprError();
+    return;
+  }
+  if (!SrcType.isTriviallyCopyableType(Self.Context)) {
+    Self.Diag(OpRange.getBegin(), diag::err_bit_cast_non_trivially_copyable)
+        << 0;
+    SrcExpr = ExprError();
+    return;
+  }
+
+  Kind = CK_CXXBitCast;
+}
+
 /// DiagnoseCastQual - Warn whenever casts discards a qualifiers, be it either
 /// const, volatile or both.
 static void DiagnoseCastQual(Sema &Self, const ExprResult &SrcExpr,
Index: clang/lib/Parse/ParseExprCXX.cpp
===================================================================
--- clang/lib/Parse/ParseExprCXX.cpp
+++ clang/lib/Parse/ParseExprCXX.cpp
@@ -3487,3 +3487,37 @@
   ConsumeAnyToken();
   return Result;
 }
+
+/// Parse a __builtin_bit_cast(T, E).
+ExprResult Parser::ParseBuiltinBitCast() {
+  SourceLocation KWLoc = ConsumeToken();
+
+  BalancedDelimiterTracker T(*this, tok::l_paren);
+  if (T.expectAndConsume(diag::err_expected_lparen_after, "__builtin_bit_cast"))
+    return ExprError();
+
+  // Parse the common declaration-specifiers piece.
+  DeclSpec DS(AttrFactory);
+  ParseSpecifierQualifierList(DS);
+
+  // Parse the abstract-declarator, if present.
+  Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
+  ParseDeclarator(DeclaratorInfo);
+
+  if (ExpectAndConsume(tok::comma)) {
+    Diag(Tok.getLocation(), diag::err_expected) << tok::comma;
+    SkipUntil(tok::r_paren, StopAtSemi);
+    return ExprError();
+  }
+
+  ExprResult Operand = ParseExpression();
+
+  if (T.consumeClose())
+    return ExprError();
+
+  if (Operand.isInvalid() || DeclaratorInfo.isInvalidType())
+    return ExprError();
+
+  return Actions.ActOnBuiltinBitCastExpr(KWLoc, DeclaratorInfo, Operand,
+                                         T.getCloseLocation());
+}
Index: clang/lib/Parse/ParseExpr.cpp
===================================================================
--- clang/lib/Parse/ParseExpr.cpp
+++ clang/lib/Parse/ParseExpr.cpp
@@ -1223,6 +1223,9 @@
   case tok::kw_static_cast:
     Res = ParseCXXCasts();
     break;
+  case tok::kw___builtin_bit_cast:
+    Res = ParseBuiltinBitCast();
+    break;
   case tok::kw_typeid:
     Res = ParseCXXTypeid();
     break;
Index: clang/lib/Edit/RewriteObjCFoundationAPI.cpp
===================================================================
--- clang/lib/Edit/RewriteObjCFoundationAPI.cpp
+++ clang/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -1079,6 +1079,7 @@
     case CK_BuiltinFnToFnPtr:
     case CK_ZeroToOCLOpaqueType:
     case CK_IntToOCLSampler:
+    case CK_CXXBitCast:
       return false;
 
     case CK_BooleanToSignedIntegral:
Index: clang/lib/CodeGen/CGExprScalar.cpp
===================================================================
--- clang/lib/CodeGen/CGExprScalar.cpp
+++ clang/lib/CodeGen/CGExprScalar.cpp
@@ -2040,7 +2040,8 @@
     Value *Src = Visit(const_cast<Expr*>(E));
     llvm::Type *SrcTy = Src->getType();
     llvm::Type *DstTy = ConvertType(DestTy);
-    if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
+    if (Kind != CK_CXXBitCast &&
+        SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
         SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
       llvm_unreachable("wrong cast for pointers in different address spaces"
                        "(must be an address space cast)!");
@@ -2080,6 +2081,22 @@
 
     return Builder.CreateBitCast(Src, DstTy);
   }
+  case CK_CXXBitCast: {
+    Address Source = CGF.CreateIRTemp(E->getType(), "bit_cast.srctemp");
+    CGF.EmitAnyExprToMem(E, Source, E->getType().getQualifiers(),
+                         /*isInit=*/false);
+    Address Dest = CGF.CreateIRTemp(DestTy, "bit_cast.dsttemp");
+    llvm::Value *Size = llvm::ConstantInt::get(
+        CGF.SizeTy, CGF.getContext().getTypeSizeInChars(DestTy).getQuantity());
+    bool IsVolatile =
+        E->getType().isVolatileQualified() || DestTy.isVolatileQualified();
+
+    Source = Builder.CreateElementBitCast(Source, CGF.Int8Ty);
+    Dest = Builder.CreateElementBitCast(Dest, CGF.Int8Ty);
+    Builder.CreateMemCpy(Dest, Source, Size, IsVolatile);
+    Dest = Builder.CreateElementBitCast(Dest, CGF.ConvertType(DestTy));
+    return EmitLoadOfLValue(CGF.MakeAddrLValue(Dest, DestTy), E->getExprLoc());
+  }
   case CK_AddressSpaceConversion: {
     Expr::EvalResult Result;
     if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
Index: clang/lib/CodeGen/CGExprConstant.cpp
===================================================================
--- clang/lib/CodeGen/CGExprConstant.cpp
+++ clang/lib/CodeGen/CGExprConstant.cpp
@@ -838,6 +838,7 @@
     // These don't need to be handled here because Evaluate knows how to
     // evaluate them in the cases where they can be folded.
     case CK_BitCast:
+    case CK_CXXBitCast:
     case CK_ToVoid:
     case CK_Dynamic:
     case CK_LValueBitCast:
Index: clang/lib/CodeGen/CGExprComplex.cpp
===================================================================
--- clang/lib/CodeGen/CGExprComplex.cpp
+++ clang/lib/CodeGen/CGExprComplex.cpp
@@ -464,6 +464,23 @@
     return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), Op->getExprLoc());
   }
 
+  case CK_CXXBitCast: {
+    Address Source = CGF.CreateIRTemp(Op->getType(), "bit_cast.srctemp");
+    CGF.EmitAnyExprToMem(Op, Source, Op->getType().getQualifiers(),
+                         /*isInit=*/false);
+    Address Dest = CGF.CreateIRTemp(DestTy, "bit_cast.dsttemp");
+    llvm::Value *Size = llvm::ConstantInt::get(
+        CGF.SizeTy, CGF.getContext().getTypeSizeInChars(DestTy).getQuantity());
+    bool IsVolatile =
+        Op->getType().isVolatileQualified() || DestTy.isVolatileQualified();
+
+    Source = Builder.CreateElementBitCast(Source, CGF.Int8Ty);
+    Dest = Builder.CreateElementBitCast(Dest, CGF.Int8Ty);
+    Builder.CreateMemCpy(Dest, Source, Size, IsVolatile);
+    Dest = Builder.CreateElementBitCast(Dest, CGF.ConvertType(DestTy));
+    return EmitLoadOfLValue(CGF.MakeAddrLValue(Dest, DestTy), Op->getExprLoc());
+  }
+
   case CK_BitCast:
   case CK_BaseToDerived:
   case CK_DerivedToBase:
Index: clang/lib/CodeGen/CGExprAgg.cpp
===================================================================
--- clang/lib/CodeGen/CGExprAgg.cpp
+++ clang/lib/CodeGen/CGExprAgg.cpp
@@ -80,6 +80,8 @@
   void EmitFinalDestCopy(QualType type, const LValue &src,
                          ExprValueKind SrcValueKind = EVK_NonRValue);
   void EmitFinalDestCopy(QualType type, RValue src);
+  void EmitTrivialFinalDestCopy(QualType Ty, const LValue &Source);
+  void EmitTrivialFinalDestCopy(QualType Ty, RValue Source);
   void EmitCopy(QualType type, const AggValueSlot &dest,
                 const AggValueSlot &src);
 
@@ -342,11 +344,20 @@
     }
   }
 
-  AggValueSlot srcAgg =
-    AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
-                            needsGC(type), AggValueSlot::IsAliased,
-                            AggValueSlot::MayOverlap);
-  EmitCopy(type, Dest, srcAgg);
+  EmitTrivialFinalDestCopy(type, src);
+}
+
+void AggExprEmitter::EmitTrivialFinalDestCopy(QualType Ty, RValue Source) {
+  LValue SourceLValue = CGF.MakeAddrLValue(Source.getAggregateAddress(), Ty);
+  EmitTrivialFinalDestCopy(Ty, SourceLValue);
+}
+
+void AggExprEmitter::EmitTrivialFinalDestCopy(QualType Ty,
+                                              const LValue &Source) {
+  AggValueSlot srcAgg = AggValueSlot::forLValue(
+      Source, AggValueSlot::IsDestructed, needsGC(Ty),
+      AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
+  EmitCopy(Ty, Dest, srcAgg);
 }
 
 /// Perform a copy from the source into the destination.
@@ -807,6 +818,25 @@
     Visit(E->getSubExpr());
     break;
 
+  case CK_CXXBitCast: {
+    if (Dest.isIgnored()) {
+      CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
+                      /*IgnoreResult=*/true);
+      break;
+    }
+
+    QualType SourceType = E->getSubExpr()->getType();
+    QualType DestType = E->getType();
+    Address SourceTemp = CGF.CreateIRTemp(SourceType, "bit_cast.srctemp");
+    CGF.EmitAnyExprToMem(E->getSubExpr(), SourceTemp,
+                         SourceType.getQualifiers(), false);
+    Address Casted =
+        Builder.CreateElementBitCast(SourceTemp, CGF.ConvertType(DestType));
+    RValue CastedRV =
+        RValue::getAggregate(Casted, DestType.isVolatileQualified());
+    return EmitTrivialFinalDestCopy(DestType, CastedRV);
+  }
+
   case CK_LValueBitCast:
     llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
 
Index: clang/lib/CodeGen/CGExpr.cpp
===================================================================
--- clang/lib/CodeGen/CGExpr.cpp
+++ clang/lib/CodeGen/CGExpr.cpp
@@ -4130,6 +4130,7 @@
   switch (E->getCastKind()) {
   case CK_ToVoid:
   case CK_BitCast:
+  case CK_CXXBitCast:
   case CK_ArrayToPointerDecay:
   case CK_FunctionToPointerDecay:
   case CK_NullToMemberPointer:
Index: clang/lib/AST/StmtProfile.cpp
===================================================================
--- clang/lib/AST/StmtProfile.cpp
+++ clang/lib/AST/StmtProfile.cpp
@@ -1569,6 +1569,11 @@
   VisitCXXNamedCastExpr(S);
 }
 
+void StmtProfiler::VisitBuiltinBitCastExpr(const BuiltinBitCastExpr *S) {
+  VisitExpr(S);
+  VisitType(S->getTypeInfoAsWritten()->getType());
+}
+
 void StmtProfiler::VisitUserDefinedLiteral(const UserDefinedLiteral *S) {
   VisitCallExpr(S);
 }
Index: clang/lib/AST/StmtPrinter.cpp
===================================================================
--- clang/lib/AST/StmtPrinter.cpp
+++ clang/lib/AST/StmtPrinter.cpp
@@ -1710,6 +1710,14 @@
   VisitCXXNamedCastExpr(Node);
 }
 
+void StmtPrinter::VisitBuiltinBitCastExpr(BuiltinBitCastExpr *Node) {
+  OS << "__builtin_bit_cast(";
+  Node->getTypeInfoAsWritten()->getType().print(OS, Policy);
+  OS << ", ";
+  PrintExpr(Node->getSubExpr());
+  OS << ")";
+}
+
 void StmtPrinter::VisitCXXTypeidExpr(CXXTypeidExpr *Node) {
   OS << "typeid(";
   if (Node->isTypeOperand()) {
Index: clang/lib/AST/ItaniumMangle.cpp
===================================================================
--- clang/lib/AST/ItaniumMangle.cpp
+++ clang/lib/AST/ItaniumMangle.cpp
@@ -3618,6 +3618,7 @@
   case Expr::AtomicExprClass:
   case Expr::SourceLocExprClass:
   case Expr::FixedPointLiteralClass:
+  case Expr::BuiltinBitCastExprClass:
   {
     if (!NullOut) {
       // As bad as this diagnostic is, it's better than crashing.
Index: clang/lib/AST/ExprConstant.cpp
===================================================================
--- clang/lib/AST/ExprConstant.cpp
+++ clang/lib/AST/ExprConstant.cpp
@@ -48,6 +48,7 @@
 #include "clang/Basic/FixedPoint.h"
 #include "clang/Basic/TargetInfo.h"
 #include "llvm/Support/SaveAndRestore.h"
+#include "llvm/ADT/Optional.h"
 #include "llvm/Support/raw_ostream.h"
 #include <cstring>
 #include <functional>
@@ -55,8 +56,10 @@
 #define DEBUG_TYPE "exprconstant"
 
 using namespace clang;
+using llvm::APInt;
 using llvm::APSInt;
 using llvm::APFloat;
+using llvm::Optional;
 
 static bool IsGlobalLValue(APValue::LValueBase B);
 
@@ -5350,6 +5353,418 @@
 //===----------------------------------------------------------------------===//
 namespace {
 
+class APBuffer {
+  // FIXME: We're going to need bit-level granularity when we support
+  // bit-fields.
+  SmallVector<Optional<unsigned char>, 32> Bytes;
+
+  bool TargetIsLittleEndian;
+
+public:
+  APBuffer(CharUnits Width, bool TargetIsLittleEndian)
+      : Bytes(Width.getQuantity()),
+        TargetIsLittleEndian(TargetIsLittleEndian) {}
+
+  bool readObject(CharUnits Offset, CharUnits Width,
+                  SmallVectorImpl<unsigned char> &Output) const {
+    for (CharUnits I = Offset, E = Offset + Width; I != E; ++I) {
+      // If a byte of an integer is uninitialized, then the whole integer is
+      // uninitalized.
+      if (!Bytes[I.getQuantity()])
+        return false;
+      Output.push_back(*Bytes[I.getQuantity()]);
+    }
+    if (llvm::sys::IsLittleEndianHost != TargetIsLittleEndian)
+      std::reverse(Output.begin(), Output.end());
+    return true;
+  }
+
+  void writeObject(CharUnits Offset, SmallVectorImpl<unsigned char> &Input) {
+    if (llvm::sys::IsLittleEndianHost != TargetIsLittleEndian)
+      std::reverse(Input.begin(), Input.end());
+
+    size_t Index = 0;
+    for (unsigned char Byte : Input) {
+      assert(!Bytes[Offset.getQuantity() + Index] && "overwriting a byte?");
+      Bytes[Offset.getQuantity() + Index] = Byte;
+      ++Index;
+    }
+  }
+
+  size_t size() { return Bytes.size(); }
+};
+
+// Traverse an APValue to produce an APBuffer, emulating how the current target
+// would represent the value at runtime.
+class BitCastReader {
+  EvalInfo &Info;
+  APBuffer Buffer;
+  const CastExpr *BCE;
+
+  BitCastReader(EvalInfo &Info, CharUnits ObjectWidth, const CastExpr *BCE)
+      : Info(Info),
+        Buffer(ObjectWidth, Info.Ctx.getTargetInfo().isLittleEndian()),
+        BCE(BCE) {}
+
+  bool visit(const APValue &Val, QualType Ty) {
+    return visit(Val, Ty, CharUnits::fromQuantity(0));
+  }
+
+  // Write out Val with type Ty into Buffer starting at Offset.
+  bool visit(const APValue &Val, QualType Ty, CharUnits Offset) {
+    assert((size_t)Offset.getQuantity() <= Buffer.size());
+
+    // Dig through Src to find the byte at SrcOffset.
+    switch (Val.getKind()) {
+    case APValue::Indeterminate:
+    case APValue::None:
+      Info.FFDiag(BCE->getBeginLoc(), diag::note_constexpr_uninitialized)
+          << true << Ty;
+      return false;
+
+    case APValue::Int:
+      return visitInt(Val.getInt(), Ty, Offset);
+    case APValue::Float:
+      return visitFloat(Val.getFloat(), Ty, Offset);
+    case APValue::Array:
+      return visitArray(Val, Ty, Offset);
+    case APValue::Struct:
+      return visitRecord(Val, Ty, Offset);
+
+    case APValue::ComplexInt:
+    case APValue::ComplexFloat:
+    case APValue::Vector:
+    case APValue::Union:
+    case APValue::MemberPointer:
+    case APValue::AddrLabelDiff:
+    case APValue::FixedPoint: {
+      Info.FFDiag(BCE->getBeginLoc(),
+                  diag::note_constexpr_bit_cast_unsupported_type)
+          << Ty;
+      return false;
+    }
+
+    case APValue::LValue: {
+      LValue LVal;
+      LVal.setFrom(Info.Ctx, Val);
+      APValue RVal;
+      if (!handleLValueToRValueConversion(Info, BCE, Ty.withConst(),
+                                          LVal, RVal))
+        return false;
+      return visit(RVal, Ty, Offset);
+    }
+    }
+  }
+
+  bool visitRecord(const APValue &Val, QualType Ty, CharUnits Offset) {
+    const RecordDecl *RD = Ty->getAsRecordDecl();
+    const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+
+    // Visit the base classes.
+    if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+      for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) {
+        const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I];
+        CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
+
+        if (!visitRecord(Val.getStructBase(I), BS.getType(),
+                         Layout.getBaseClassOffset(BaseDecl) + Offset))
+          return false;
+      }
+    }
+
+    // Visit the fields.
+    unsigned FieldIdx = 0;
+    for (FieldDecl *FD : RD->fields()) {
+      if (FD->isBitField()) {
+        Info.FFDiag(BCE->getBeginLoc(),
+                    diag::note_constexpr_bit_cast_unsupported_bitfield);
+        return false;
+      }
+
+      uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldIdx);
+
+      assert(FieldOffsetBits % 8 == 0 &&
+             "only bit-fields can have sub-char alignment");
+      CharUnits FieldOffset =
+          CharUnits::fromQuantity(FieldOffsetBits / 8) + Offset;
+      QualType FieldTy = FD->getType();
+      if (!visit(Val.getStructField(FieldIdx), FieldTy, FieldOffset))
+        return false;
+      ++FieldIdx;
+    }
+
+    return true;
+  }
+
+  bool visitArray(const APValue &Val, QualType Ty, CharUnits Offset) {
+    const auto *CAT =
+        dyn_cast_or_null<ConstantArrayType>(Ty->getAsArrayTypeUnsafe());
+    if (!CAT)
+      return false;
+    CharUnits ElemWidth = Info.Ctx.getTypeSizeInChars(CAT->getElementType());
+    expandArray(const_cast<APValue &>(Val), Val.getArraySize()-1);
+    for (unsigned I = 0, E = Val.getArraySize(); I != E; ++I) {
+      const APValue &SubObj = Val.getArrayInitializedElt(I);
+      if (!visit(SubObj, CAT->getElementType(), Offset + I * ElemWidth))
+        return false;
+    }
+    return true;
+  }
+
+  bool visitInt(const APSInt &Val, QualType Ty, CharUnits Offset) {
+    CharUnits Width = Info.Ctx.getTypeSizeInChars(Ty);
+    SmallVector<unsigned char, 8> Bytes(Width.getQuantity());
+    llvm::StoreIntToMemory(Val, &*Bytes.begin(), Width.getQuantity());
+    Buffer.writeObject(Offset, Bytes);
+    return true;
+  }
+
+  bool visitFloat(const APFloat &Val, QualType Ty, CharUnits Offset) {
+    APSInt AsInt(Val.bitcastToAPInt());
+    return visitInt(AsInt, Ty, Offset);
+  }
+
+public:
+  static Optional<APBuffer> read(EvalInfo &Info, const APValue &Src,
+                                 const CastExpr *BCE) {
+    CharUnits DstSize = Info.Ctx.getTypeSizeInChars(BCE->getType());
+    BitCastReader Reader(Info, DstSize, BCE);
+    if (!Reader.visit(Src, BCE->getSubExpr()->getType()))
+      return None;
+    return Reader.Buffer;
+  }
+};
+
+class BitCastWriter {
+  EvalInfo &Info;
+  const APBuffer &Buffer;
+  const CastExpr *BCE;
+
+  BitCastWriter(EvalInfo &Info, const APBuffer &Buffer, const CastExpr *BCE)
+      : Info(Info), Buffer(Buffer), BCE(BCE) {}
+
+  // Emit an unsupported bit_cast type error. Sema refuses to build a bit_cast
+  // with an invalid type, so anything left is a deficiency on our part (FIXME).
+  // Ideally this will be unreachable.
+  llvm::NoneType unsupportedType(QualType Ty) {
+    Info.FFDiag(BCE->getBeginLoc(),
+                diag::note_constexpr_bit_cast_unsupported_type)
+        << Ty;
+    return None;
+  }
+
+  Optional<APValue> visit(const BuiltinType *T, CharUnits Offset) {
+    CharUnits SizeOf = Info.Ctx.getTypeSizeInChars(T);
+    SmallVector<uint8_t, 8> Bytes;
+    if (!Buffer.readObject(Offset, SizeOf, Bytes))
+      return APValue();
+
+    APSInt Val(SizeOf.getQuantity() * 8, true);
+    llvm::LoadIntFromMemory(Val, &*Bytes.begin(), Bytes.size());
+
+    if (T->isIntegralOrEnumerationType()) {
+      Val.setIsSigned(T->isSignedIntegerOrEnumerationType());
+      return APValue(Val);
+    }
+
+    if (T->isRealFloatingType()) {
+      const llvm::fltSemantics &Semantics =
+          Info.Ctx.getFloatTypeSemantics(QualType(T, 0));
+      return APValue(APFloat(Semantics, Val));
+    }
+
+    return unsupportedType(QualType(T, 0));
+  }
+
+  Optional<APValue> visit(const RecordType *RTy, CharUnits Offset) {
+    const RecordDecl *RD = RTy->getAsRecordDecl();
+    const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+
+    unsigned NumBases = 0;
+    if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+      NumBases = CXXRD->getNumBases();
+
+    APValue ResultVal(APValue::UninitStruct(), NumBases,
+                      std::distance(RD->field_begin(), RD->field_end()));
+
+    // Visit the base classes.
+    if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+      for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) {
+        const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I];
+        CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
+        if (BaseDecl->isEmpty() ||
+            Info.Ctx.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
+          continue;
+
+        Optional<APValue> SubObj = visitType(
+            BS.getType(), Layout.getBaseClassOffset(BaseDecl) + Offset);
+        if (!SubObj)
+          return None;
+        ResultVal.getStructBase(I) = *SubObj;
+      }
+    }
+
+    // Visit the fields.
+    unsigned FieldIdx = 0;
+    for (FieldDecl *FD : RD->fields()) {
+      // FIXME: We don't currently support bit-fields. A lot of the logic for
+      // this is in CodeGen, so we need to factor it around.
+      if (FD->isBitField()) {
+        Info.FFDiag(BCE->getBeginLoc(),
+                    diag::note_constexpr_bit_cast_unsupported_bitfield);
+        return None;
+      }
+
+      uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldIdx);
+      assert(FieldOffsetBits % 8 == 0);
+
+      CharUnits FieldOffset =
+          CharUnits::fromQuantity(FieldOffsetBits / 8) + Offset;
+      QualType FieldTy = FD->getType();
+      Optional<APValue> SubObj = visitType(FieldTy, FieldOffset);
+      if (!SubObj)
+        return None;
+      ResultVal.getStructField(FieldIdx) = *SubObj;
+      ++FieldIdx;
+    }
+
+    return ResultVal;
+  }
+
+  Optional<APValue> visit(const EnumType *Ty, CharUnits Offset) {
+    QualType RepresentationType = Ty->getDecl()->getIntegerType();
+    assert(!RepresentationType.isNull() &&
+           "enum forward decl should be caught by Sema");
+    return visitType(RepresentationType, Offset);
+  }
+
+  Optional<APValue> visit(const ConstantArrayType *Ty, CharUnits Offset) {
+    size_t Size = Ty->getSize().getLimitedValue();
+    CharUnits ElementWidth = Info.Ctx.getTypeSizeInChars(Ty->getElementType());
+
+    APValue ArrayValue(APValue::UninitArray(), Size, Size);
+    for (size_t I = 0; I != Size; ++I) {
+      Optional<APValue> ElementValue =
+          visitType(Ty->getElementType(), Offset + I * ElementWidth);
+      if (!ElementValue)
+        return None;
+      ArrayValue.getArrayInitializedElt(I) = std::move(*ElementValue);
+    }
+
+    return ArrayValue;
+  }
+
+  Optional<APValue> visit(const Type *Ty, CharUnits Offset) {
+    return unsupportedType(QualType(Ty, 0));
+  }
+
+  Optional<APValue> visitType(QualType Ty, CharUnits Offset) {
+    QualType Can = Ty.getCanonicalType();
+
+    switch (Can->getTypeClass()) {
+#define TYPE(Class, Base)                                                      \
+  case Type::Class:                                                            \
+    return visit(cast<Class##Type>(Can.getTypePtr()), Offset);
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)                                        \
+  case Type::Class:                                                            \
+    llvm_unreachable("non-canonical type should be impossible!");
+#define DEPENDENT_TYPE(Class, Base)                                            \
+  case Type::Class:                                                            \
+    llvm_unreachable(                                                          \
+        "dependent types aren't supported in the constant evaluator!");
+#define NON_CANONICAL_UNLESS_DEPENDENT(Class, Base)                            \
+  case Type::Class:                                                            \
+    llvm_unreachable("either dependent or not canonical!");
+#include "clang/AST/TypeNodes.def"
+    }
+  }
+
+public:
+  // Pull out a full value of type DstType.
+  static Optional<APValue> write(EvalInfo &Info, APBuffer &Buffer,
+                                 const CastExpr *BCE) {
+    BitCastWriter Writer(Info, Buffer, BCE);
+    return Writer.visitType(BCE->getType(), CharUnits::fromQuantity(0));
+  }
+};
+
+static bool checkBitCastConstexprEligibilityType(SourceLocation Loc,
+                                                 QualType Ty, EvalInfo *Info,
+                                                 const ASTContext &Ctx) {
+  Ty = Ty.getCanonicalType();
+
+  auto diag = [&](int Reason) {
+    if (Info)
+      Info->FFDiag(Loc, diag::note_constexpr_bit_cast_invalid_type)
+          << Reason << Ty;
+    return false;
+  };
+  auto note = [&](int Construct, QualType NoteTy, SourceLocation NoteLoc) {
+    if (Info)
+      Info->Note(NoteLoc, diag::note_constexpr_bit_cast_invalid_subtype)
+          << NoteTy << Construct << Ty;
+    return false;
+  };
+
+  if (Ty->isUnionType())
+    return diag(0);
+  if (Ty->isPointerType())
+    return diag(1);
+  if (Ty->isMemberPointerType())
+    return diag(2);
+  if (Ty.isVolatileQualified())
+    return diag(3);
+
+  if (RecordDecl *Record = Ty->getAsRecordDecl()) {
+    if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Record)) {
+      for (CXXBaseSpecifier &BS : CXXRD->bases())
+        if (!checkBitCastConstexprEligibilityType(Loc, BS.getType(), Info, Ctx))
+          return note(1, BS.getType(), BS.getBeginLoc());
+    }
+    for (FieldDecl *FD : Record->fields()) {
+      if (!checkBitCastConstexprEligibilityType(Loc, FD->getType(), Info, Ctx))
+        return note(0, FD->getType(), FD->getBeginLoc());
+    }
+  }
+
+  if (Ty->isArrayType() && !checkBitCastConstexprEligibilityType(
+          Loc, Ctx.getBaseElementType(Ty), Info, Ctx))
+    return false;
+
+  return true;
+}
+
+static bool checkBitCastConstexprEligibility(EvalInfo *Info,
+                                             const ASTContext &Ctx,
+                                             const CastExpr *BCE) {
+  bool DestOK = checkBitCastConstexprEligibilityType(BCE->getBeginLoc(),
+                                                     BCE->getType(), Info, Ctx);
+  bool SourceOK = DestOK && checkBitCastConstexprEligibilityType(
+                                BCE->getBeginLoc(),
+                                BCE->getSubExpr()->getType(), Info, Ctx);
+  return SourceOK;
+}
+
+static bool handleBitCast(EvalInfo &Info, APValue &DestValue,
+                          APValue &SourceValue, const CastExpr *BCE) {
+  if (!checkBitCastConstexprEligibility(&Info, Info.Ctx, BCE))
+    return false;
+
+  // Read out SourceValue into a char buffer.
+  Optional<APBuffer> Buffer = BitCastReader::read(Info, SourceValue, BCE);
+  if (!Buffer)
+    return false;
+
+  // Write out the buffer into a new APValue.
+  Optional<APValue> MaybeDestValue = BitCastWriter::write(Info, *Buffer, BCE);
+  if (!MaybeDestValue)
+    return false;
+
+  DestValue = std::move(*MaybeDestValue);
+  return true;
+}
+
 template <class Derived>
 class ExprEvaluatorBase
   : public ConstStmtVisitor<Derived, bool> {
@@ -5484,6 +5899,9 @@
       CCEDiag(E, diag::note_constexpr_invalid_cast) << 1;
     return static_cast<Derived*>(this)->VisitCastExpr(E);
   }
+  bool VisitBuiltinBitCastExpr(const BuiltinBitCastExpr *E) {
+    return static_cast<Derived*>(this)->VisitCastExpr(E);
+  }
 
   bool VisitBinaryOperator(const BinaryOperator *E) {
     switch (E->getOpcode()) {
@@ -5776,6 +6194,14 @@
         return false;
       return DerivedSuccess(RVal, E);
     }
+    case CK_CXXBitCast: {
+      APValue DestValue, SourceValue;
+      if (!Evaluate(SourceValue, Info, E->getSubExpr()))
+        return false;
+      if (!handleBitCast(Info, DestValue, SourceValue, E))
+        return false;
+      return DerivedSuccess(DestValue, E);
+    }
     }
 
     return Error(E);
@@ -10570,6 +10996,7 @@
   case CK_LValueToRValue:
   case CK_AtomicToNonAtomic:
   case CK_NoOp:
+  case CK_CXXBitCast:
     return ExprEvaluatorBaseTy::VisitCastExpr(E);
 
   case CK_MemberPointerToBoolean:
@@ -11182,6 +11609,7 @@
   case CK_LValueToRValue:
   case CK_AtomicToNonAtomic:
   case CK_NoOp:
+  case CK_CXXBitCast:
     return ExprEvaluatorBaseTy::VisitCastExpr(E);
 
   case CK_Dependent:
@@ -12477,6 +12905,11 @@
   case Expr::ChooseExprClass: {
     return CheckICE(cast<ChooseExpr>(E)->getChosenSubExpr(), Ctx);
   }
+  case Expr::BuiltinBitCastExprClass: {
+    if (!checkBitCastConstexprEligibility(nullptr, Ctx, cast<CastExpr>(E)))
+      return ICEDiag(IK_NotICE, E->getBeginLoc());
+    return CheckICE(cast<CastExpr>(E)->getSubExpr(), Ctx);
+  }
   }
 
   llvm_unreachable("Invalid StmtClass!");
Index: clang/lib/AST/ExprClassification.cpp
===================================================================
--- clang/lib/AST/ExprClassification.cpp
+++ clang/lib/AST/ExprClassification.cpp
@@ -420,6 +420,8 @@
   case Expr::CoawaitExprClass:
   case Expr::CoyieldExprClass:
     return ClassifyInternal(Ctx, cast<CoroutineSuspendExpr>(E)->getResumeExpr());
+  case Expr::BuiltinBitCastExprClass:
+    return Cl::CL_PRValue;
   }
 
   llvm_unreachable("unhandled expression kind in classification");
Index: clang/lib/AST/Expr.cpp
===================================================================
--- clang/lib/AST/Expr.cpp
+++ clang/lib/AST/Expr.cpp
@@ -1739,6 +1739,7 @@
   case CK_UserDefinedConversion:    // operator bool()
   case CK_BuiltinFnToFnPtr:
   case CK_FixedPointToBoolean:
+  case CK_CXXBitCast:
   CheckNoBasePath:
     assert(path_empty() && "Cast kind should not have a base path!");
     break;
@@ -3450,6 +3451,9 @@
     if (IncludePossibleEffects)
       return true;
     break;
+  case BuiltinBitCastExprClass:
+    return cast<BuiltinBitCastExpr>(this)->getSubExpr()->HasSideEffects(
+        Ctx, IncludePossibleEffects);
   }
 
   // Recurse to children.
Index: clang/include/clang/Sema/Sema.h
===================================================================
--- clang/include/clang/Sema/Sema.h
+++ clang/include/clang/Sema/Sema.h
@@ -5194,6 +5194,13 @@
                                SourceRange AngleBrackets,
                                SourceRange Parens);
 
+  ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
+                                     ExprResult Operand,
+                                     SourceLocation RParenLoc);
+
+  ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
+                                     Expr *Operand, SourceLocation RParenLoc);
+
   ExprResult BuildCXXTypeId(QualType TypeInfoType,
                             SourceLocation TypeidLoc,
                             TypeSourceInfo *Operand,
Index: clang/include/clang/Parse/Parser.h
===================================================================
--- clang/include/clang/Parse/Parser.h
+++ clang/include/clang/Parse/Parser.h
@@ -3059,6 +3059,9 @@
                                  unsigned ArgumentIndex) override;
   void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
   void CodeCompleteNaturalLanguage() override;
+
+  /// Parse a __builtin_bit_cast(T, E).
+  ExprResult ParseBuiltinBitCast();
 };
 
 }  // end namespace clang
Index: clang/include/clang/Basic/TokenKinds.def
===================================================================
--- clang/include/clang/Basic/TokenKinds.def
+++ clang/include/clang/Basic/TokenKinds.def
@@ -669,7 +669,7 @@
 KEYWORD(__builtin_convertvector   , KEYALL)
 ALIAS("__char16_t"   , char16_t   , KEYCXX)
 ALIAS("__char32_t"   , char32_t   , KEYCXX)
-
+KEYWORD(__builtin_bit_cast        , KEYALL)
 KEYWORD(__builtin_available       , KEYALL)
 
 // Clang-specific keywords enabled only in testing.
Index: clang/include/clang/Basic/StmtNodes.td
===================================================================
--- clang/include/clang/Basic/StmtNodes.td
+++ clang/include/clang/Basic/StmtNodes.td
@@ -192,6 +192,7 @@
 def BlockExpr : DStmt<Expr>;
 def OpaqueValueExpr : DStmt<Expr>;
 def TypoExpr : DStmt<Expr>;
+def BuiltinBitCastExpr : DStmt<ExplicitCastExpr>;
 
 // Microsoft Extensions.
 def MSPropertyRefExpr : DStmt<Expr>;
Index: clang/include/clang/Basic/Features.def
===================================================================
--- clang/include/clang/Basic/Features.def
+++ clang/include/clang/Basic/Features.def
@@ -249,6 +249,7 @@
 EXTENSION(pragma_clang_attribute_namespaces, true)
 EXTENSION(pragma_clang_attribute_external_declaration, true)
 EXTENSION(gnu_asm, LangOpts.GNUAsm)
+EXTENSION(builtin_bit_cast, true)
 
 #undef EXTENSION
 #undef FEATURE
Index: clang/include/clang/Basic/DiagnosticSemaKinds.td
===================================================================
--- clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -9712,4 +9712,8 @@
   "%select{non-pointer|function pointer|void pointer}0 argument to "
   "'__builtin_launder' is not allowed">;
 
+def err_bit_cast_non_trivially_copyable : Error<
+  "__builtin_bit_cast %select{source|destination}0 type must be a trivially copyable">;
+def err_bit_cast_type_size_mismatch : Error<
+  "__builtin_bit_cast source size does not match destination size (%0 and %1)">;
 } // end of sema component.
Index: clang/include/clang/Basic/DiagnosticASTKinds.td
===================================================================
--- clang/include/clang/Basic/DiagnosticASTKinds.td
+++ clang/include/clang/Basic/DiagnosticASTKinds.td
@@ -213,6 +213,15 @@
   "size to copy (%4) is not a multiple of size of element type %3 (%5)|"
   "source is not a contiguous array of at least %4 elements of type %3|"
   "destination is not a contiguous array of at least %4 elements of type %3}2">;
+def note_constexpr_bit_cast_unsupported_type : Note<
+  "constexpr bit_cast involving type %0 is not yet supported">;
+def note_constexpr_bit_cast_unsupported_bitfield : Note<
+  "constexpr bit_cast involving bit-field in not yet supported">;
+def note_constexpr_bit_cast_invalid_type : Note<
+  "cannot constexpr evaluate a bit_cast with a "
+  "%select{union|pointer|member pointer|volatile}0 type %1">;
+def note_constexpr_bit_cast_invalid_subtype : Note<
+  "invalid type %0 is a %select{member|base}1 of %2">;
 
 def warn_integer_constant_overflow : Warning<
   "overflow in expression; result is %0 with type %1">,
Index: clang/include/clang/AST/RecursiveASTVisitor.h
===================================================================
--- clang/include/clang/AST/RecursiveASTVisitor.h
+++ clang/include/clang/AST/RecursiveASTVisitor.h
@@ -2282,6 +2282,10 @@
   TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
 })
 
+DEF_TRAVERSE_STMT(BuiltinBitCastExpr, {
+  TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
+})
+
 template <typename Derived>
 bool RecursiveASTVisitor<Derived>::TraverseSynOrSemInitListExpr(
     InitListExpr *S, DataRecursionQueue *Queue) {
Index: clang/include/clang/AST/OperationKinds.def
===================================================================
--- clang/include/clang/AST/OperationKinds.def
+++ clang/include/clang/AST/OperationKinds.def
@@ -66,6 +66,12 @@
 ///    bool b; reinterpret_cast<char&>(b) = 'a';
 CAST_OPERATION(LValueBitCast)
 
+/// CK_CXXBitCast - Represents std::bit_cast. Like CK_BitCast, but with very few
+/// semantic requirements, namely, the source and destination types just need to
+/// be of the same size and trivially copyable. There is no CK_LValueBitCast
+/// analog for this, since std::bit_cast isn't able to create a need for it.
+CAST_OPERATION(CXXBitCast)
+
 /// CK_LValueToRValue - A conversion which causes the extraction of
 /// an r-value from the operand gl-value.  The result of an r-value
 /// conversion is always unqualified.
Index: clang/include/clang/AST/ExprCXX.h
===================================================================
--- clang/include/clang/AST/ExprCXX.h
+++ clang/include/clang/AST/ExprCXX.h
@@ -4716,6 +4716,35 @@
   }
 };
 
+/// Represents a C++2a __builtin_bit_cast(T, v) expression. Used to implement
+/// std::bit_cast. These can sometimes be evaluated as part of a constant
+/// expression, but otherwise CodeGen to a simple memcpy in general.
+class BuiltinBitCastExpr final
+    : public ExplicitCastExpr,
+      private llvm::TrailingObjects<BuiltinBitCastExpr, CXXBaseSpecifier *> {
+  friend class ASTStmtReader;
+  friend class CastExpr;
+  friend class TrailingObjects;
+
+  SourceLocation KWLoc;
+  SourceLocation RParenLoc;
+
+public:
+  BuiltinBitCastExpr(QualType T, ExprValueKind VK, CastKind CK, Expr *SrcExpr,
+                     TypeSourceInfo *DstType, SourceLocation KWLoc,
+                     SourceLocation RParenLoc)
+      : ExplicitCastExpr(BuiltinBitCastExprClass, T, VK, CK, SrcExpr, 0,
+                         DstType),
+        KWLoc(KWLoc), RParenLoc(RParenLoc) {}
+
+  SourceLocation getBeginLoc() const LLVM_READONLY { return KWLoc; }
+  SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
+
+  static bool classof(const Stmt *T) {
+    return T->getStmtClass() == BuiltinBitCastExprClass;
+  }
+};
+
 } // namespace clang
 
 #endif // LLVM_CLANG_AST_EXPRCXX_H
Index: clang/include/clang-c/Index.h
===================================================================
--- clang/include/clang-c/Index.h
+++ clang/include/clang-c/Index.h
@@ -2536,7 +2536,11 @@
    */
   CXCursor_OMPTargetTeamsDistributeSimdDirective = 279,
 
-  CXCursor_LastStmt = CXCursor_OMPTargetTeamsDistributeSimdDirective,
+  /** C++2a std::bit_cast expression.
+   */
+  CXCursor_BuiltinBitCastExpr = 280,
+
+  CXCursor_LastStmt = CXCursor_BuiltinBitCastExpr,
 
   /**
    * Cursor that represents the translation unit itself.
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to