https://github.com/sarnex created 
https://github.com/llvm/llvm-project/pull/138000

I'm planning on modifying this code so format it so we can pass the formatting 
check.

>From 3ae86aef81e0ae19e49a4c6db9869e327d4ad4fc Mon Sep 17 00:00:00 2001
From: "Sarnie, Nick" <nick.sar...@intel.com>
Date: Wed, 30 Apr 2025 10:33:58 -0700
Subject: [PATCH] [clang][NFC] Format two files with CallingConv switches

Signed-off-by: Sarnie, Nick <nick.sar...@intel.com>
---
 clang/lib/AST/Type.cpp       | 666 ++++++++++++++++++-----------------
 clang/lib/CodeGen/CGCall.cpp | 469 ++++++++++++------------
 2 files changed, 578 insertions(+), 557 deletions(-)

diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp
index d298f1cff73cf..59369fba2e772 100644
--- a/clang/lib/AST/Type.cpp
+++ b/clang/lib/AST/Type.cpp
@@ -56,17 +56,17 @@ using namespace clang;
 
 bool Qualifiers::isStrictSupersetOf(Qualifiers Other) const {
   return (*this != Other) &&
-    // CVR qualifiers superset
-    (((Mask & CVRMask) | (Other.Mask & CVRMask)) == (Mask & CVRMask)) &&
-    // ObjC GC qualifiers superset
-    ((getObjCGCAttr() == Other.getObjCGCAttr()) ||
-     (hasObjCGCAttr() && !Other.hasObjCGCAttr())) &&
-    // Address space superset.
-    ((getAddressSpace() == Other.getAddressSpace()) ||
-     (hasAddressSpace()&& !Other.hasAddressSpace())) &&
-    // Lifetime qualifier superset.
-    ((getObjCLifetime() == Other.getObjCLifetime()) ||
-     (hasObjCLifetime() && !Other.hasObjCLifetime()));
+         // CVR qualifiers superset
+         (((Mask & CVRMask) | (Other.Mask & CVRMask)) == (Mask & CVRMask)) &&
+         // ObjC GC qualifiers superset
+         ((getObjCGCAttr() == Other.getObjCGCAttr()) ||
+          (hasObjCGCAttr() && !Other.hasObjCGCAttr())) &&
+         // Address space superset.
+         ((getAddressSpace() == Other.getAddressSpace()) ||
+          (hasAddressSpace() && !Other.hasAddressSpace())) &&
+         // Lifetime qualifier superset.
+         ((getObjCLifetime() == Other.getObjCLifetime()) ||
+          (hasObjCLifetime() && !Other.hasObjCLifetime()));
 }
 
 bool Qualifiers::isTargetAddressSpaceSupersetOf(LangAS A, LangAS B,
@@ -105,8 +105,8 @@ bool Qualifiers::isTargetAddressSpaceSupersetOf(LangAS A, 
LangAS B,
          Ctx.getTargetInfo().isAddressSpaceSupersetOf(A, B);
 }
 
-const IdentifierInfo* QualType::getBaseTypeIdentifier() const {
-  const Type* ty = getTypePtr();
+const IdentifierInfo *QualType::getBaseTypeIdentifier() const {
+  const Type *ty = getTypePtr();
   NamedDecl *ND = nullptr;
   if (ty->isPointerOrReferenceType())
     return ty->getPointeeType().getBaseTypeIdentifier();
@@ -117,8 +117,9 @@ const IdentifierInfo* QualType::getBaseTypeIdentifier() 
const {
   else if (ty->getTypeClass() == Type::Typedef)
     ND = ty->castAs<TypedefType>()->getDecl();
   else if (ty->isArrayType())
-    return ty->castAsArrayTypeUnsafe()->
-        getElementType().getBaseTypeIdentifier();
+    return ty->castAsArrayTypeUnsafe()
+        ->getElementType()
+        .getBaseTypeIdentifier();
 
   if (ND)
     return ND->getIdentifier();
@@ -147,7 +148,7 @@ bool QualType::isConstant(QualType T, const ASTContext 
&Ctx) {
 
 std::optional<QualType::NonConstantStorageReason>
 QualType::isNonConstantStorage(const ASTContext &Ctx, bool ExcludeCtor,
-                            bool ExcludeDtor) {
+                               bool ExcludeDtor) {
   if (!isConstant(Ctx) && !(*this)->isReferenceType())
     return NonConstantStorageReason::NonConstNonReferenceType;
   if (!Ctx.getLangOpts().CPlusPlus)
@@ -208,9 +209,10 @@ ConstantArrayType::Create(const ASTContext &Ctx, QualType 
ET, QualType Can,
       ConstantArrayType(ET, Can, SzPtr, SzMod, Qual);
 }
 
-unsigned ConstantArrayType::getNumAddressingBits(const ASTContext &Context,
-                                                 QualType ElementType,
-                                               const llvm::APInt &NumElements) 
{
+unsigned
+ConstantArrayType::getNumAddressingBits(const ASTContext &Context,
+                                        QualType ElementType,
+                                        const llvm::APInt &NumElements) {
   uint64_t ElementSize = Context.getTypeSizeInChars(ElementType).getQuantity();
 
   // Fast path the common cases so we can avoid the conservative computation
@@ -234,8 +236,8 @@ unsigned ConstantArrayType::getNumAddressingBits(const 
ASTContext &Context,
   // Otherwise, use APSInt to handle arbitrary sized values.
   llvm::APSInt SizeExtended(NumElements, true);
   unsigned SizeTypeBits = Context.getTypeSize(Context.getSizeType());
-  SizeExtended = SizeExtended.extend(std::max(SizeTypeBits,
-                                              SizeExtended.getBitWidth()) * 2);
+  SizeExtended = SizeExtended.extend(
+      std::max(SizeTypeBits, SizeExtended.getBitWidth()) * 2);
 
   llvm::APSInt TotalSize(llvm::APInt(SizeExtended.getBitWidth(), ElementSize));
   TotalSize *= SizeExtended;
@@ -285,11 +287,9 @@ DependentSizedArrayType::DependentSizedArrayType(QualType 
et, QualType can,
     : ArrayType(DependentSizedArray, et, can, sm, tq, e), SizeExpr((Stmt *)e) 
{}
 
 void DependentSizedArrayType::Profile(llvm::FoldingSetNodeID &ID,
-                                      const ASTContext &Context,
-                                      QualType ET,
+                                      const ASTContext &Context, QualType ET,
                                       ArraySizeModifier SizeMod,
-                                      unsigned TypeQuals,
-                                      Expr *E) {
+                                      unsigned TypeQuals, Expr *E) {
   ID.AddPointer(ET.getAsOpaquePtr());
   ID.AddInteger(llvm::to_underlying(SizeMod));
   ID.AddInteger(TypeQuals);
@@ -329,10 +329,10 @@ 
DependentSizedExtVectorType::DependentSizedExtVectorType(QualType ElementType,
                          : TypeDependence::None)),
       SizeExpr(SizeExpr), ElementType(ElementType), loc(loc) {}
 
-void
-DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID,
-                                     const ASTContext &Context,
-                                     QualType ElementType, Expr *SizeExpr) {
+void DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID,
+                                          const ASTContext &Context,
+                                          QualType ElementType,
+                                          Expr *SizeExpr) {
   ID.AddPointer(ElementType.getAsOpaquePtr());
   SizeExpr->Profile(ID, Context, true);
 }
@@ -479,7 +479,8 @@ const Type *Type::getArrayElementTypeNoTypeQual() const {
   // If this is a typedef for an array type, strip the typedef off without
   // losing all typedef information.
   return cast<ArrayType>(getUnqualifiedDesugaredType())
-    ->getElementType().getTypePtr();
+      ->getElementType()
+      .getTypePtr();
 }
 
 /// getDesugaredType - Return the specified type with any "sugar" removed from
@@ -503,8 +504,8 @@ QualType QualType::getSingleStepDesugaredTypeImpl(QualType 
type,
 // Check that no type class is polymorphic. LLVM style RTTI should be used
 // instead. If absolutely needed an exception can still be added here by
 // defining the appropriate macro (but please don't do this).
-#define TYPE(CLASS, BASE) \
-  static_assert(!std::is_polymorphic<CLASS##Type>::value, \
+#define TYPE(CLASS, BASE)                                                      
\
+  static_assert(!std::is_polymorphic<CLASS##Type>::value,                      
\
                 #CLASS "Type should not be polymorphic!");
 #include "clang/AST/TypeNodes.inc"
 
@@ -519,11 +520,12 @@ QualType 
QualType::getSingleStepDesugaredTypeImpl(QualType type,
 QualType Type::getLocallyUnqualifiedSingleStepDesugaredType() const {
   switch (getTypeClass()) {
 #define ABSTRACT_TYPE(Class, Parent)
-#define TYPE(Class, Parent) \
-  case Type::Class: { \
-    const auto *ty = cast<Class##Type>(this); \
-    if (!ty->isSugared()) return QualType(ty, 0); \
-    return ty->desugar(); \
+#define TYPE(Class, Parent)                                                    
\
+  case Type::Class: {                                                          
\
+    const auto *ty = cast<Class##Type>(this);                                  
\
+    if (!ty->isSugared())                                                      
\
+      return QualType(ty, 0);                                                  
\
+    return ty->desugar();                                                      
\
   }
 #include "clang/AST/TypeNodes.inc"
   }
@@ -538,14 +540,14 @@ SplitQualType QualType::getSplitDesugaredType(QualType T) 
{
     const Type *CurTy = Qs.strip(Cur);
     switch (CurTy->getTypeClass()) {
 #define ABSTRACT_TYPE(Class, Parent)
-#define TYPE(Class, Parent) \
-    case Type::Class: { \
-      const auto *Ty = cast<Class##Type>(CurTy); \
-      if (!Ty->isSugared()) \
-        return SplitQualType(Ty, Qs); \
-      Cur = Ty->desugar(); \
-      break; \
-    }
+#define TYPE(Class, Parent)                                                    
\
+  case Type::Class: {                                                          
\
+    const auto *Ty = cast<Class##Type>(CurTy);                                 
\
+    if (!Ty->isSugared())                                                      
\
+      return SplitQualType(Ty, Qs);                                            
\
+    Cur = Ty->desugar();                                                       
\
+    break;                                                                     
\
+  }
 #include "clang/AST/TypeNodes.inc"
     }
   }
@@ -567,13 +569,14 @@ SplitQualType 
QualType::getSplitUnqualifiedTypeImpl(QualType type) {
     // sugared.
     switch (split.Ty->getTypeClass()) {
 #define ABSTRACT_TYPE(Class, Parent)
-#define TYPE(Class, Parent) \
-    case Type::Class: { \
-      const auto *ty = cast<Class##Type>(split.Ty); \
-      if (!ty->isSugared()) goto done; \
-      next = ty->desugar(); \
-      break; \
-    }
+#define TYPE(Class, Parent)                                                    
\
+  case Type::Class: {                                                          
\
+    const auto *ty = cast<Class##Type>(split.Ty);                              
\
+    if (!ty->isSugared())                                                      
\
+      goto done;                                                               
\
+    next = ty->desugar();                                                      
\
+    break;                                                                     
\
+  }
 #include "clang/AST/TypeNodes.inc"
     }
 
@@ -586,7 +589,7 @@ SplitQualType 
QualType::getSplitUnqualifiedTypeImpl(QualType type) {
     }
   }
 
- done:
+done:
   return SplitQualType(lastTypeWithQuals, quals);
 }
 
@@ -600,19 +603,20 @@ QualType QualType::IgnoreParens(QualType T) {
 /// This will check for a T (which should be a Type which can act as
 /// sugar, such as a TypedefType) by removing any existing sugar until it
 /// reaches a T or a non-sugared type.
-template<typename T> static const T *getAsSugar(const Type *Cur) {
+template <typename T> static const T *getAsSugar(const Type *Cur) {
   while (true) {
     if (const auto *Sugar = dyn_cast<T>(Cur))
       return Sugar;
     switch (Cur->getTypeClass()) {
 #define ABSTRACT_TYPE(Class, Parent)
-#define TYPE(Class, Parent) \
-    case Type::Class: { \
-      const auto *Ty = cast<Class##Type>(Cur); \
-      if (!Ty->isSugared()) return 0; \
-      Cur = Ty->desugar().getTypePtr(); \
-      break; \
-    }
+#define TYPE(Class, Parent)                                                    
\
+  case Type::Class: {                                                          
\
+    const auto *Ty = cast<Class##Type>(Cur);                                   
\
+    if (!Ty->isSugared())                                                      
\
+      return 0;                                                                
\
+    Cur = Ty->desugar().getTypePtr();                                          
\
+    break;                                                                     
\
+  }
 #include "clang/AST/TypeNodes.inc"
     }
   }
@@ -651,13 +655,14 @@ const Type *Type::getUnqualifiedDesugaredType() const {
   while (true) {
     switch (Cur->getTypeClass()) {
 #define ABSTRACT_TYPE(Class, Parent)
-#define TYPE(Class, Parent) \
-    case Class: { \
-      const auto *Ty = cast<Class##Type>(Cur); \
-      if (!Ty->isSugared()) return Cur; \
-      Cur = Ty->desugar().getTypePtr(); \
-      break; \
-    }
+#define TYPE(Class, Parent)                                                    
\
+  case Class: {                                                                
\
+    const auto *Ty = cast<Class##Type>(Cur);                                   
\
+    if (!Ty->isSugared())                                                      
\
+      return Cur;                                                              
\
+    Cur = Ty->desugar().getTypePtr();                                          
\
+    break;                                                                     
\
+  }
 #include "clang/AST/TypeNodes.inc"
     }
   }
@@ -821,8 +826,9 @@ bool Type::isObjCIdOrObjectKindOfType(const ASTContext &ctx,
     return false;
 
   // Figure out the type bound for the __kindof type.
-  bound = OPT->getObjectType()->stripObjCKindOfTypeAndQuals(ctx)
-            ->getAs<ObjCObjectType>();
+  bound = OPT->getObjectType()
+              ->stripObjCKindOfTypeAndQuals(ctx)
+              ->getAs<ObjCObjectType>();
   return true;
 }
 
@@ -925,8 +931,8 @@ bool ObjCObjectType::isKindOfType() const {
   return false;
 }
 
-QualType ObjCObjectType::stripObjCKindOfTypeAndQuals(
-           const ASTContext &ctx) const {
+QualType
+ObjCObjectType::stripObjCKindOfTypeAndQuals(const ASTContext &ctx) const {
   if (!isKindOfType() && qual_empty())
     return QualType(this, 0);
 
@@ -936,11 +942,11 @@ QualType ObjCObjectType::stripObjCKindOfTypeAndQuals(
   if (const auto *baseObj = splitBaseType.Ty->getAs<ObjCObjectType>())
     baseType = baseObj->stripObjCKindOfTypeAndQuals(ctx);
 
-  return ctx.getObjCObjectType(ctx.getQualifiedType(baseType,
-                                                    splitBaseType.Quals),
-                               getTypeArgsAsWritten(),
-                               /*protocols=*/{},
-                               /*isKindOf=*/false);
+  return ctx.getObjCObjectType(
+      ctx.getQualifiedType(baseType, splitBaseType.Quals),
+      getTypeArgsAsWritten(),
+      /*protocols=*/{},
+      /*isKindOf=*/false);
 }
 
 ObjCInterfaceDecl *ObjCInterfaceType::getDecl() const {
@@ -951,7 +957,7 @@ ObjCInterfaceDecl *ObjCInterfaceType::getDecl() const {
 }
 
 const ObjCObjectPointerType 
*ObjCObjectPointerType::stripObjCKindOfTypeAndQuals(
-                               const ASTContext &ctx) const {
+    const ASTContext &ctx) const {
   if (!isKindOfType() && qual_empty())
     return this;
 
@@ -987,22 +993,22 @@ struct SimpleTransformVisitor : public 
TypeVisitor<Derived, QualType> {
   // None of the clients of this transformation can occur where
   // there are dependent types, so skip dependent types.
 #define TYPE(Class, Base)
-#define DEPENDENT_TYPE(Class, Base) \
+#define DEPENDENT_TYPE(Class, Base)                                            
\
   QualType Visit##Class##Type(const Class##Type *T) { return QualType(T, 0); }
 #include "clang/AST/TypeNodes.inc"
 
-#define TRIVIAL_TYPE_CLASS(Class) \
+#define TRIVIAL_TYPE_CLASS(Class)                                              
\
   QualType Visit##Class##Type(const Class##Type *T) { return QualType(T, 0); }
-#define SUGARED_TYPE_CLASS(Class) \
-  QualType Visit##Class##Type(const Class##Type *T) { \
-    if (!T->isSugared()) \
-      return QualType(T, 0); \
-    QualType desugaredType = recurse(T->desugar()); \
-    if (desugaredType.isNull()) \
-      return {}; \
-    if (desugaredType.getAsOpaquePtr() == T->desugar().getAsOpaquePtr()) \
-      return QualType(T, 0); \
-    return desugaredType; \
+#define SUGARED_TYPE_CLASS(Class)                                              
\
+  QualType Visit##Class##Type(const Class##Type *T) {                          
\
+    if (!T->isSugared())                                                       
\
+      return QualType(T, 0);                                                   
\
+    QualType desugaredType = recurse(T->desugar());                            
\
+    if (desugaredType.isNull())                                                
\
+      return {};                                                               
\
+    if (desugaredType.getAsOpaquePtr() == T->desugar().getAsOpaquePtr())       
\
+      return QualType(T, 0);                                                   
\
+    return desugaredType;                                                      
\
   }
 
   TRIVIAL_TYPE_CLASS(Builtin)
@@ -1045,8 +1051,8 @@ struct SimpleTransformVisitor : public 
TypeVisitor<Derived, QualType> {
     if (pointeeType.isNull())
       return {};
 
-    if (pointeeType.getAsOpaquePtr()
-          == T->getPointeeTypeAsWritten().getAsOpaquePtr())
+    if (pointeeType.getAsOpaquePtr() ==
+        T->getPointeeTypeAsWritten().getAsOpaquePtr())
       return QualType(T, 0);
 
     return Ctx.getLValueReferenceType(pointeeType, T->isSpelledAsLValue());
@@ -1057,8 +1063,8 @@ struct SimpleTransformVisitor : public 
TypeVisitor<Derived, QualType> {
     if (pointeeType.isNull())
       return {};
 
-    if (pointeeType.getAsOpaquePtr()
-          == T->getPointeeTypeAsWritten().getAsOpaquePtr())
+    if (pointeeType.getAsOpaquePtr() ==
+        T->getPointeeTypeAsWritten().getAsOpaquePtr())
       return QualType(T, 0);
 
     return Ctx.getRValueReferenceType(pointeeType);
@@ -1231,8 +1237,8 @@ struct SimpleTransformVisitor : public 
TypeVisitor<Derived, QualType> {
     if (adjustedType.isNull())
       return {};
 
-    if (originalType.getAsOpaquePtr()
-          == T->getOriginalType().getAsOpaquePtr() &&
+    if (originalType.getAsOpaquePtr() ==
+            T->getOriginalType().getAsOpaquePtr() &&
         adjustedType.getAsOpaquePtr() == T->getAdjustedType().getAsOpaquePtr())
       return QualType(T, 0);
 
@@ -1244,8 +1250,7 @@ struct SimpleTransformVisitor : public 
TypeVisitor<Derived, QualType> {
     if (originalType.isNull())
       return {};
 
-    if (originalType.getAsOpaquePtr()
-          == T->getOriginalType().getAsOpaquePtr())
+    if (originalType.getAsOpaquePtr() == T->getOriginalType().getAsOpaquePtr())
       return QualType(T, 0);
 
     return Ctx.getDecayedType(originalType);
@@ -1278,10 +1283,10 @@ struct SimpleTransformVisitor : public 
TypeVisitor<Derived, QualType> {
     if (equivalentType.isNull())
       return {};
 
-    if (modifiedType.getAsOpaquePtr()
-          == T->getModifiedType().getAsOpaquePtr() &&
-        equivalentType.getAsOpaquePtr()
-          == T->getEquivalentType().getAsOpaquePtr())
+    if (modifiedType.getAsOpaquePtr() ==
+            T->getModifiedType().getAsOpaquePtr() &&
+        equivalentType.getAsOpaquePtr() ==
+            T->getEquivalentType().getAsOpaquePtr())
       return QualType(T, 0);
 
     return Ctx.getAttributedType(T->getAttrKind(), modifiedType, 
equivalentType,
@@ -1293,8 +1298,8 @@ struct SimpleTransformVisitor : public 
TypeVisitor<Derived, QualType> {
     if (replacementType.isNull())
       return {};
 
-    if (replacementType.getAsOpaquePtr()
-          == T->getReplacementType().getAsOpaquePtr())
+    if (replacementType.getAsOpaquePtr() ==
+        T->getReplacementType().getAsOpaquePtr())
       return QualType(T, 0);
 
     return Ctx.getSubstTemplateTypeParmType(
@@ -1313,13 +1318,11 @@ struct SimpleTransformVisitor : public 
TypeVisitor<Derived, QualType> {
     if (deducedType.isNull())
       return {};
 
-    if (deducedType.getAsOpaquePtr()
-          == T->getDeducedType().getAsOpaquePtr())
+    if (deducedType.getAsOpaquePtr() == T->getDeducedType().getAsOpaquePtr())
       return QualType(T, 0);
 
-    return Ctx.getAutoType(deducedType, T->getKeyword(),
-                           T->isDependentType(), /*IsPack=*/false,
-                           T->getTypeConstraintConcept(),
+    return Ctx.getAutoType(deducedType, T->getKeyword(), T->isDependentType(),
+                           /*IsPack=*/false, T->getTypeConstraintConcept(),
                            T->getTypeConstraintArguments());
   }
 
@@ -1359,8 +1362,7 @@ struct SimpleTransformVisitor : public 
TypeVisitor<Derived, QualType> {
     if (pointeeType.isNull())
       return {};
 
-    if (pointeeType.getAsOpaquePtr()
-          == T->getPointeeType().getAsOpaquePtr())
+    if (pointeeType.getAsOpaquePtr() == T->getPointeeType().getAsOpaquePtr())
       return QualType(T, 0);
 
     return Ctx.getObjCObjectPointerType(pointeeType);
@@ -1371,8 +1373,7 @@ struct SimpleTransformVisitor : public 
TypeVisitor<Derived, QualType> {
     if (valueType.isNull())
       return {};
 
-    if (valueType.getAsOpaquePtr()
-          == T->getValueType().getAsOpaquePtr())
+    if (valueType.getAsOpaquePtr() == T->getValueType().getAsOpaquePtr())
       return QualType(T, 0);
 
     return Ctx.getAtomicType(valueType);
@@ -1409,7 +1410,7 @@ struct SubstObjCTypeArgsVisitor
       protocolsVec.append(OTPTy->qual_begin(), OTPTy->qual_end());
       ArrayRef<ObjCProtocolDecl *> protocolsToApply = protocolsVec;
       return Ctx.applyObjCProtocolQualifiers(
-          argType, protocolsToApply, hasError, true/*allowOnPointerType*/);
+          argType, protocolsToApply, hasError, true /*allowOnPointerType*/);
     }
 
     switch (SubstContext) {
@@ -1447,7 +1448,7 @@ struct SubstObjCTypeArgsVisitor
     // If we have a function type, update the substitution context
     // appropriately.
 
-    //Substitute result type.
+    // Substitute result type.
     QualType returnType = funcType->getReturnType().substObjCTypeArgs(
         Ctx, TypeArgs, ObjCSubstitutionContext::Result);
     if (returnType.isNull())
@@ -1721,14 +1722,14 @@ Type::getObjCSubstitutions(const DeclContext *dc) const 
{
   } else if (getAs<BlockPointerType>()) {
     ASTContext &ctx = dc->getParentASTContext();
     objectType = ctx.getObjCObjectType(ctx.ObjCBuiltinIdTy, {}, {})
-                   ->castAs<ObjCObjectType>();
+                     ->castAs<ObjCObjectType>();
   } else {
     objectType = getAs<ObjCObjectType>();
   }
 
   /// Extract the class from the receiver object type.
-  ObjCInterfaceDecl *curClassDecl = objectType ? objectType->getInterface()
-                                               : nullptr;
+  ObjCInterfaceDecl *curClassDecl =
+      objectType ? objectType->getInterface() : nullptr;
   if (!curClassDecl) {
     // If we don't have a context type (e.g., this is "id" or some
     // variant thereof), substitute the bounds.
@@ -1799,7 +1800,7 @@ void ObjCObjectType::computeSuperClassTypeSlow() const {
   ObjCTypeParamList *superClassTypeParams = superClassDecl->getTypeParamList();
   if (!superClassTypeParams) {
     CachedSuperClassType.setPointerAndInt(
-      superClassType->castAs<ObjCObjectType>(), true);
+        superClassType->castAs<ObjCObjectType>(), true);
     return;
   }
 
@@ -1814,19 +1815,18 @@ void ObjCObjectType::computeSuperClassTypeSlow() const {
   ObjCTypeParamList *typeParams = classDecl->getTypeParamList();
   if (!typeParams) {
     CachedSuperClassType.setPointerAndInt(
-      superClassType->castAs<ObjCObjectType>(), true);
+        superClassType->castAs<ObjCObjectType>(), true);
     return;
   }
 
   // If the subclass type isn't specialized, return the unspecialized
   // superclass.
   if (isUnspecialized()) {
-    QualType unspecializedSuper
-      = classDecl->getASTContext().getObjCInterfaceType(
-          superClassObjTy->getInterface());
+    QualType unspecializedSuper =
+        classDecl->getASTContext().getObjCInterfaceType(
+            superClassObjTy->getInterface());
     CachedSuperClassType.setPointerAndInt(
-      unspecializedSuper->castAs<ObjCObjectType>(),
-      true);
+        unspecializedSuper->castAs<ObjCObjectType>(), true);
     return;
   }
 
@@ -1834,16 +1834,18 @@ void ObjCObjectType::computeSuperClassTypeSlow() const {
   ArrayRef<QualType> typeArgs = getTypeArgs();
   assert(typeArgs.size() == typeParams->size());
   CachedSuperClassType.setPointerAndInt(
-    superClassType.substObjCTypeArgs(classDecl->getASTContext(), typeArgs,
-                                     ObjCSubstitutionContext::Superclass)
-      ->castAs<ObjCObjectType>(),
-    true);
+      superClassType
+          .substObjCTypeArgs(classDecl->getASTContext(), typeArgs,
+                             ObjCSubstitutionContext::Superclass)
+          ->castAs<ObjCObjectType>(),
+      true);
 }
 
 const ObjCInterfaceType *ObjCObjectPointerType::getInterfaceType() const {
   if (auto interfaceDecl = getObjectType()->getInterface()) {
-    return interfaceDecl->getASTContext().getObjCInterfaceType(interfaceDecl)
-             ->castAs<ObjCInterfaceType>();
+    return interfaceDecl->getASTContext()
+        .getObjCInterfaceType(interfaceDecl)
+        ->castAs<ObjCInterfaceType>();
   }
 
   return nullptr;
@@ -1960,103 +1962,100 @@ bool Type::hasAttr(attr::Kind AK) const {
 
 namespace {
 
-  class GetContainedDeducedTypeVisitor :
-    public TypeVisitor<GetContainedDeducedTypeVisitor, Type*> {
-    bool Syntactic;
+class GetContainedDeducedTypeVisitor
+    : public TypeVisitor<GetContainedDeducedTypeVisitor, Type *> {
+  bool Syntactic;
 
-  public:
-    GetContainedDeducedTypeVisitor(bool Syntactic = false)
-        : Syntactic(Syntactic) {}
+public:
+  GetContainedDeducedTypeVisitor(bool Syntactic = false)
+      : Syntactic(Syntactic) {}
 
-    using TypeVisitor<GetContainedDeducedTypeVisitor, Type*>::Visit;
+  using TypeVisitor<GetContainedDeducedTypeVisitor, Type *>::Visit;
 
-    Type *Visit(QualType T) {
-      if (T.isNull())
-        return nullptr;
-      return Visit(T.getTypePtr());
-    }
+  Type *Visit(QualType T) {
+    if (T.isNull())
+      return nullptr;
+    return Visit(T.getTypePtr());
+  }
 
-    // The deduced type itself.
-    Type *VisitDeducedType(const DeducedType *AT) {
-      return const_cast<DeducedType*>(AT);
-    }
+  // The deduced type itself.
+  Type *VisitDeducedType(const DeducedType *AT) {
+    return const_cast<DeducedType *>(AT);
+  }
 
-    // Only these types can contain the desired 'auto' type.
-    Type *VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
-      return Visit(T->getReplacementType());
-    }
+  // Only these types can contain the desired 'auto' type.
+  Type *VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
+    return Visit(T->getReplacementType());
+  }
 
-    Type *VisitElaboratedType(const ElaboratedType *T) {
-      return Visit(T->getNamedType());
-    }
+  Type *VisitElaboratedType(const ElaboratedType *T) {
+    return Visit(T->getNamedType());
+  }
 
-    Type *VisitPointerType(const PointerType *T) {
-      return Visit(T->getPointeeType());
-    }
+  Type *VisitPointerType(const PointerType *T) {
+    return Visit(T->getPointeeType());
+  }
 
-    Type *VisitBlockPointerType(const BlockPointerType *T) {
-      return Visit(T->getPointeeType());
-    }
+  Type *VisitBlockPointerType(const BlockPointerType *T) {
+    return Visit(T->getPointeeType());
+  }
 
-    Type *VisitReferenceType(const ReferenceType *T) {
-      return Visit(T->getPointeeTypeAsWritten());
-    }
+  Type *VisitReferenceType(const ReferenceType *T) {
+    return Visit(T->getPointeeTypeAsWritten());
+  }
 
-    Type *VisitMemberPointerType(const MemberPointerType *T) {
-      return Visit(T->getPointeeType());
-    }
+  Type *VisitMemberPointerType(const MemberPointerType *T) {
+    return Visit(T->getPointeeType());
+  }
 
-    Type *VisitArrayType(const ArrayType *T) {
-      return Visit(T->getElementType());
-    }
+  Type *VisitArrayType(const ArrayType *T) {
+    return Visit(T->getElementType());
+  }
 
-    Type *VisitDependentSizedExtVectorType(
-      const DependentSizedExtVectorType *T) {
-      return Visit(T->getElementType());
-    }
+  Type *VisitDependentSizedExtVectorType(const DependentSizedExtVectorType *T) 
{
+    return Visit(T->getElementType());
+  }
 
-    Type *VisitVectorType(const VectorType *T) {
-      return Visit(T->getElementType());
-    }
+  Type *VisitVectorType(const VectorType *T) {
+    return Visit(T->getElementType());
+  }
 
-    Type *VisitDependentSizedMatrixType(const DependentSizedMatrixType *T) {
-      return Visit(T->getElementType());
-    }
+  Type *VisitDependentSizedMatrixType(const DependentSizedMatrixType *T) {
+    return Visit(T->getElementType());
+  }
 
-    Type *VisitConstantMatrixType(const ConstantMatrixType *T) {
-      return Visit(T->getElementType());
-    }
+  Type *VisitConstantMatrixType(const ConstantMatrixType *T) {
+    return Visit(T->getElementType());
+  }
 
-    Type *VisitFunctionProtoType(const FunctionProtoType *T) {
-      if (Syntactic && T->hasTrailingReturn())
-        return const_cast<FunctionProtoType*>(T);
-      return VisitFunctionType(T);
-    }
+  Type *VisitFunctionProtoType(const FunctionProtoType *T) {
+    if (Syntactic && T->hasTrailingReturn())
+      return const_cast<FunctionProtoType *>(T);
+    return VisitFunctionType(T);
+  }
 
-    Type *VisitFunctionType(const FunctionType *T) {
-      return Visit(T->getReturnType());
-    }
+  Type *VisitFunctionType(const FunctionType *T) {
+    return Visit(T->getReturnType());
+  }
 
-    Type *VisitParenType(const ParenType *T) {
-      return Visit(T->getInnerType());
-    }
+  Type *VisitParenType(const ParenType *T) { return Visit(T->getInnerType()); }
 
-    Type *VisitAttributedType(const AttributedType *T) {
-      return Visit(T->getModifiedType());
-    }
+  Type *VisitAttributedType(const AttributedType *T) {
+    return Visit(T->getModifiedType());
+  }
 
-    Type *VisitMacroQualifiedType(const MacroQualifiedType *T) {
-      return Visit(T->getUnderlyingType());
-    }
+  Type *VisitMacroQualifiedType(const MacroQualifiedType *T) {
+    return Visit(T->getUnderlyingType());
+  }
 
-    Type *VisitAdjustedType(const AdjustedType *T) {
-      return Visit(T->getOriginalType());
-    }
+  Type *VisitAdjustedType(const AdjustedType *T) {
+    return Visit(T->getOriginalType());
+  }
 
-    Type *VisitPackExpansionType(const PackExpansionType *T) {
-      return Visit(T->getPattern());
-    }
-  };
+  Type *VisitPackExpansionType(const PackExpansionType *T) {
+    return Visit(T->getPattern());
+  }
+};
 
 } // namespace
 
@@ -2176,9 +2175,11 @@ bool Type::isChar32Type() const {
 /// types.
 bool Type::isAnyCharacterType() const {
   const auto *BT = dyn_cast<BuiltinType>(CanonicalType);
-  if (!BT) return false;
+  if (!BT)
+    return false;
   switch (BT->getKind()) {
-  default: return false;
+  default:
+    return false;
   case BuiltinType::Char_U:
   case BuiltinType::UChar:
   case BuiltinType::WChar_U:
@@ -2269,7 +2270,7 @@ bool Type::isUnsignedIntegerType() const {
 bool Type::isUnsignedIntegerOrEnumerationType() const {
   if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
     return BT->getKind() >= BuiltinType::Bool &&
-    BT->getKind() <= BuiltinType::UInt128;
+           BT->getKind() <= BuiltinType::UInt128;
   }
 
   if (const auto *ET = dyn_cast<EnumType>(CanonicalType)) {
@@ -2326,7 +2327,7 @@ bool Type::isRealType() const {
     return BT->getKind() >= BuiltinType::Bool &&
            BT->getKind() <= BuiltinType::Ibm128;
   if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
-      return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
+    return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
   return isBitIntType();
 }
 
@@ -2362,11 +2363,16 @@ Type::ScalarTypeKind Type::getScalarTypeKind() const {
 
   const Type *T = CanonicalType.getTypePtr();
   if (const auto *BT = dyn_cast<BuiltinType>(T)) {
-    if (BT->getKind() == BuiltinType::Bool) return STK_Bool;
-    if (BT->getKind() == BuiltinType::NullPtr) return STK_CPointer;
-    if (BT->isInteger()) return STK_Integral;
-    if (BT->isFloatingPoint()) return STK_Floating;
-    if (BT->isFixedPointType()) return STK_FixedPoint;
+    if (BT->getKind() == BuiltinType::Bool)
+      return STK_Bool;
+    if (BT->getKind() == BuiltinType::NullPtr)
+      return STK_CPointer;
+    if (BT->isInteger())
+      return STK_Integral;
+    if (BT->isFloatingPoint())
+      return STK_Floating;
+    if (BT->isFixedPointType())
+      return STK_FixedPoint;
     llvm_unreachable("unknown scalar builtin type");
   } else if (isa<PointerType>(T)) {
     return STK_CPointer;
@@ -2428,7 +2434,8 @@ bool Type::isIncompleteType(NamedDecl **Def) const {
     *Def = nullptr;
 
   switch (CanonicalType->getTypeClass()) {
-  default: return false;
+  default:
+    return false;
   case Builtin:
     // Void is the only incomplete builtin type.  Per C99 6.2.5p19, it can 
never
     // be completed.
@@ -2461,8 +2468,9 @@ bool Type::isIncompleteType(NamedDecl **Def) const {
     // (C++ [dcl.array]p1).
     // We don't handle dependent-sized arrays (dependent types are never 
treated
     // as incomplete).
-    return cast<ArrayType>(CanonicalType)->getElementType()
-             ->isIncompleteType(Def);
+    return cast<ArrayType>(CanonicalType)
+        ->getElementType()
+        ->isIncompleteType(Def);
   case IncompleteArray:
     // An array of unknown size is an incomplete type (C99 6.2.5p22).
     return true;
@@ -2487,12 +2495,13 @@ bool Type::isIncompleteType(NamedDecl **Def) const {
     return true;
   }
   case ObjCObject:
-    return cast<ObjCObjectType>(CanonicalType)->getBaseType()
-             ->isIncompleteType(Def);
+    return cast<ObjCObjectType>(CanonicalType)
+        ->getBaseType()
+        ->isIncompleteType(Def);
   case ObjCInterface: {
     // ObjC interfaces are incomplete if they are @class, not @interface.
-    ObjCInterfaceDecl *Interface
-      = cast<ObjCInterfaceType>(CanonicalType)->getDecl();
+    ObjCInterfaceDecl *Interface =
+        cast<ObjCInterfaceType>(CanonicalType)->getDecl();
     if (Def)
       *Def = Interface;
     return !Interface->hasDefinition();
@@ -2707,7 +2716,8 @@ bool QualType::isCXX98PODType(const ASTContext &Context) 
const {
   QualType CanonicalType = getTypePtr()->CanonicalType;
   switch (CanonicalType->getTypeClass()) {
     // Everything not explicitly mentioned is not POD.
-  default: return false;
+  default:
+    return false;
   case Type::VariableArray:
   case Type::ConstantArray:
     // IncompleteArray is handled above.
@@ -2915,7 +2925,8 @@ bool QualType::isNonWeakInMRRWithObjCWeak(const 
ASTContext &Context) const {
          getObjCLifetime() != Qualifiers::OCL_Weak;
 }
 
-bool QualType::hasNonTrivialToPrimitiveDefaultInitializeCUnion(const 
RecordDecl *RD) {
+bool QualType::hasNonTrivialToPrimitiveDefaultInitializeCUnion(
+    const RecordDecl *RD) {
   return RD->hasNonTrivialToPrimitiveDefaultInitializeCUnion();
 }
 
@@ -3079,7 +3090,8 @@ bool Type::isStandardLayoutType() const {
     return false;
 
   // As an extension, Clang treats vector types as Scalar types.
-  if (BaseTy->isScalarType() || BaseTy->isVectorType()) return true;
+  if (BaseTy->isScalarType() || BaseTy->isVectorType())
+    return true;
   if (const auto *RT = BaseTy->getAs<RecordType>()) {
     if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
       if (!ClassDecl->isStandardLayout())
@@ -3121,17 +3133,20 @@ bool QualType::isCXX11PODType(const ASTContext 
&Context) const {
     return false;
 
   // As an extension, Clang treats vector types as Scalar types.
-  if (BaseTy->isScalarType() || BaseTy->isVectorType()) return true;
+  if (BaseTy->isScalarType() || BaseTy->isVectorType())
+    return true;
   if (const auto *RT = BaseTy->getAs<RecordType>()) {
     if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
       // C++11 [class]p10:
       //   A POD struct is a non-union class that is both a trivial class [...]
-      if (!ClassDecl->isTrivial()) return false;
+      if (!ClassDecl->isTrivial())
+        return false;
 
       // C++11 [class]p10:
       //   A POD struct is a non-union class that is both a trivial class and
       //   a standard-layout class [...]
-      if (!ClassDecl->isStandardLayout()) return false;
+      if (!ClassDecl->isStandardLayout())
+        return false;
 
       // C++11 [class]p10:
       //   A POD struct is a non-union class that is both a trivial class and
@@ -3221,9 +3236,8 @@ TypeWithKeyword::getKeywordForTypeSpec(unsigned TypeSpec) 
{
   }
 }
 
-TagTypeKind
-TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) {
-  switch(TypeSpec) {
+TagTypeKind TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) {
+  switch (TypeSpec) {
   case TST_class:
     return TagTypeKind::Class;
   case TST_struct:
@@ -3276,8 +3290,7 @@ 
TypeWithKeyword::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) {
   llvm_unreachable("Unknown elaborated type keyword.");
 }
 
-bool
-TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) {
+bool TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) {
   switch (Keyword) {
   case ElaboratedTypeKeyword::None:
   case ElaboratedTypeKeyword::Typename:
@@ -3358,7 +3371,9 @@ bool Type::isElaboratedTypeSpecifier() const {
 const char *Type::getTypeClassName() const {
   switch (TypeBits.TC) {
 #define ABSTRACT_TYPE(Derived, Base)
-#define TYPE(Derived, Base) case Derived: return #Derived;
+#define TYPE(Derived, Base)                                                    
\
+  case Derived:                                                                
\
+    return #Derived;
 #include "clang/AST/TypeNodes.inc"
   }
 
@@ -3496,8 +3511,8 @@ StringRef BuiltinType::getName(const PrintingPolicy 
&Policy) const {
     return "Class";
   case ObjCSel:
     return "SEL";
-#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
-  case Id: \
+#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix)                   
\
+  case Id:                                                                     
\
     return "__" #Access " " #ImgType "_t";
 #include "clang/Basic/OpenCLImageTypes.def"
   case OCLSampler:
@@ -3518,16 +3533,16 @@ StringRef BuiltinType::getName(const PrintingPolicy 
&Policy) const {
     return "<OpenMP array shaping type>";
   case OMPIterator:
     return "<OpenMP iterator type>";
-#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
-  case Id: \
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext)                                      
\
+  case Id:                                                                     
\
     return #ExtType;
 #include "clang/Basic/OpenCLExtensionTypes.def"
 #define SVE_TYPE(Name, Id, SingletonId)                                        
\
   case Id:                                                                     
\
     return #Name;
 #include "clang/Basic/AArch64SVEACLETypes.def"
-#define PPC_VECTOR_TYPE(Name, Id, Size) \
-  case Id: \
+#define PPC_VECTOR_TYPE(Name, Id, Size)                                        
\
+  case Id:                                                                     
\
     return #Name;
 #include "clang/Basic/PPCTypes.def"
 #define RVV_TYPE(Name, Id, SingletonId)                                        
\
@@ -3576,29 +3591,52 @@ QualType QualType::getNonLValueExprType(const 
ASTContext &Context) const {
 
 StringRef FunctionType::getNameForCallConv(CallingConv CC) {
   switch (CC) {
-  case CC_C: return "cdecl";
-  case CC_X86StdCall: return "stdcall";
-  case CC_X86FastCall: return "fastcall";
-  case CC_X86ThisCall: return "thiscall";
-  case CC_X86Pascal: return "pascal";
-  case CC_X86VectorCall: return "vectorcall";
-  case CC_Win64: return "ms_abi";
-  case CC_X86_64SysV: return "sysv_abi";
-  case CC_X86RegCall : return "regcall";
-  case CC_AAPCS: return "aapcs";
-  case CC_AAPCS_VFP: return "aapcs-vfp";
-  case CC_AArch64VectorCall: return "aarch64_vector_pcs";
-  case CC_AArch64SVEPCS: return "aarch64_sve_pcs";
-  case CC_AMDGPUKernelCall: return "amdgpu_kernel";
-  case CC_IntelOclBicc: return "intel_ocl_bicc";
-  case CC_SpirFunction: return "spir_function";
-  case CC_OpenCLKernel: return "opencl_kernel";
-  case CC_Swift: return "swiftcall";
-  case CC_SwiftAsync: return "swiftasynccall";
-  case CC_PreserveMost: return "preserve_most";
-  case CC_PreserveAll: return "preserve_all";
-  case CC_M68kRTD: return "m68k_rtd";
-  case CC_PreserveNone: return "preserve_none";
+  case CC_C:
+    return "cdecl";
+  case CC_X86StdCall:
+    return "stdcall";
+  case CC_X86FastCall:
+    return "fastcall";
+  case CC_X86ThisCall:
+    return "thiscall";
+  case CC_X86Pascal:
+    return "pascal";
+  case CC_X86VectorCall:
+    return "vectorcall";
+  case CC_Win64:
+    return "ms_abi";
+  case CC_X86_64SysV:
+    return "sysv_abi";
+  case CC_X86RegCall:
+    return "regcall";
+  case CC_AAPCS:
+    return "aapcs";
+  case CC_AAPCS_VFP:
+    return "aapcs-vfp";
+  case CC_AArch64VectorCall:
+    return "aarch64_vector_pcs";
+  case CC_AArch64SVEPCS:
+    return "aarch64_sve_pcs";
+  case CC_AMDGPUKernelCall:
+    return "amdgpu_kernel";
+  case CC_IntelOclBicc:
+    return "intel_ocl_bicc";
+  case CC_SpirFunction:
+    return "spir_function";
+  case CC_OpenCLKernel:
+    return "opencl_kernel";
+  case CC_Swift:
+    return "swiftcall";
+  case CC_SwiftAsync:
+    return "swiftasynccall";
+  case CC_PreserveMost:
+    return "preserve_most";
+  case CC_PreserveAll:
+    return "preserve_all";
+  case CC_M68kRTD:
+    return "m68k_rtd";
+  case CC_PreserveNone:
+    return "preserve_none";
     // clang-format off
   case CC_RISCVVectorCall: return "riscv_vector_cc";
 #define CC_VLS_CASE(ABI_VLEN) \
@@ -3882,12 +3920,10 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID 
&ID, QualType Result,
   // This method is relatively performance sensitive, so as a performance
   // shortcut, use one AddInteger call instead of four for the next four
   // fields.
-  assert(!(unsigned(epi.Variadic) & ~1) &&
-         !(unsigned(epi.RefQualifier) & ~3) &&
+  assert(!(unsigned(epi.Variadic) & ~1) && !(unsigned(epi.RefQualifier) & ~3) 
&&
          !(unsigned(epi.ExceptionSpec.Type) & ~15) &&
          "Values larger than expected.");
-  ID.AddInteger(unsigned(epi.Variadic) +
-                (epi.RefQualifier << 1) +
+  ID.AddInteger(unsigned(epi.Variadic) + (epi.RefQualifier << 1) +
                 (epi.ExceptionSpec.Type << 3));
   ID.Add(epi.TypeQuals);
   if (epi.ExceptionSpec.Type == EST_Dynamic) {
@@ -4047,9 +4083,7 @@ TypeOfExprType::TypeOfExprType(const ASTContext &Context, 
Expr *E,
   TypeOfBits.Kind = static_cast<unsigned>(Kind);
 }
 
-bool TypeOfExprType::isSugared() const {
-  return !TOExpr->isTypeDependent();
-}
+bool TypeOfExprType::isSugared() const { return !TOExpr->isTypeDependent(); }
 
 QualType TypeOfExprType::desugar() const {
   if (isSugared()) {
@@ -4194,16 +4228,12 @@ static TagDecl *getInterestingTagDecl(TagDecl *decl) {
   return decl;
 }
 
-TagDecl *TagType::getDecl() const {
-  return getInterestingTagDecl(decl);
-}
+TagDecl *TagType::getDecl() const { return getInterestingTagDecl(decl); }
 
-bool TagType::isBeingDefined() const {
-  return getDecl()->isBeingDefined();
-}
+bool TagType::isBeingDefined() const { return getDecl()->isBeingDefined(); }
 
 bool RecordType::hasConstFields() const {
-  std::vector<const RecordType*> RecordTypeList;
+  std::vector<const RecordType *> RecordTypeList;
   RecordTypeList.push_back(this);
   unsigned NextToCheckIndex = 0;
 
@@ -4264,7 +4294,8 @@ bool AttributedType::isQualifier() const {
 bool AttributedType::isMSTypeSpec() const {
   // FIXME: Generate this with TableGen?
   switch (getAttrKind()) {
-  default: return false;
+  default:
+    return false;
   case attr::Ptr32:
   case attr::Ptr64:
   case attr::SPtr:
@@ -4281,7 +4312,8 @@ bool AttributedType::isWebAssemblyFuncrefSpec() const {
 bool AttributedType::isCallingConv() const {
   // FIXME: Generate this with TableGen.
   switch (getAttrKind()) {
-  default: return false;
+  default:
+    return false;
   case attr::Pcs:
   case attr::CDecl:
   case attr::FastCall:
@@ -4413,7 +4445,8 @@ void 
SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID,
 }
 
 bool TemplateSpecializationType::anyDependentTemplateArguments(
-    const TemplateArgumentListInfo &Args, ArrayRef<TemplateArgument> 
Converted) {
+    const TemplateArgumentListInfo &Args,
+    ArrayRef<TemplateArgument> Converted) {
   return anyDependentTemplateArguments(Args.arguments(), Converted);
 }
 
@@ -4426,7 +4459,7 @@ bool 
TemplateSpecializationType::anyDependentTemplateArguments(
 }
 
 bool TemplateSpecializationType::anyInstantiationDependentTemplateArguments(
-      ArrayRef<TemplateArgumentLoc> Args) {
+    ArrayRef<TemplateArgumentLoc> Args) {
   for (const TemplateArgumentLoc &ArgLoc : Args) {
     if (ArgLoc.getArgument().isInstantiationDependent())
       return true;
@@ -4509,24 +4542,23 @@ void 
TemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
     Arg.Profile(ID, Context);
 }
 
-QualType
-QualifierCollector::apply(const ASTContext &Context, QualType QT) const {
+QualType QualifierCollector::apply(const ASTContext &Context,
+                                   QualType QT) const {
   if (!hasNonFastQualifiers())
     return QT.withFastQualifiers(getFastQualifiers());
 
   return Context.getQualifiedType(QT, *this);
 }
 
-QualType
-QualifierCollector::apply(const ASTContext &Context, const Type *T) const {
+QualType QualifierCollector::apply(const ASTContext &Context,
+                                   const Type *T) const {
   if (!hasNonFastQualifiers())
     return QualType(T, getFastQualifiers());
 
   return Context.getQualifiedType(T, *this);
 }
 
-void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID,
-                                 QualType BaseType,
+void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID, QualType BaseType,
                                  ArrayRef<QualType> typeArgs,
                                  ArrayRef<ObjCProtocolDecl *> protocols,
                                  bool isKindOf) {
@@ -4593,9 +4625,7 @@ namespace clang {
 /// leakage.
 template <class Private> class TypePropertyCache {
 public:
-  static CachedProperties get(QualType T) {
-    return get(T.getTypePtr());
-  }
+  static CachedProperties get(QualType T) { return get(T.getTypePtr()); }
 
   static CachedProperties get(const Type *T) {
     ensure(T);
@@ -4605,7 +4635,8 @@ template <class Private> class TypePropertyCache {
 
   static void ensure(const Type *T) {
     // If the cache is valid, we're okay.
-    if (T->TypeBits.isCacheValid()) return;
+    if (T->TypeBits.isCacheValid())
+      return;
 
     // If this type is non-canonical, ask its canonical type for the
     // relevant information.
@@ -4641,14 +4672,14 @@ using Cache = TypePropertyCache<Private>;
 
 static CachedProperties computeCachedProperties(const Type *T) {
   switch (T->getTypeClass()) {
-#define TYPE(Class,Base)
-#define NON_CANONICAL_TYPE(Class,Base) case Type::Class:
+#define TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
 #include "clang/AST/TypeNodes.inc"
     llvm_unreachable("didn't expect a non-canonical type here");
 
-#define TYPE(Class,Base)
-#define DEPENDENT_TYPE(Class,Base) case Type::Class:
-#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class,Base) case Type::Class:
+#define TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
 #include "clang/AST/TypeNodes.inc"
     // Treat instantiation-dependent types as external.
     assert(T->isInstantiationDependentType());
@@ -4676,9 +4707,8 @@ static CachedProperties computeCachedProperties(const 
Type *T) {
     //       for linkage purposes (7.1.3)) and the name has linkage; or
     //     -  it is a specialization of a class template (14); or
     Linkage L = Tag->getLinkageInternal();
-    bool IsLocalOrUnnamed =
-      Tag->getDeclContext()->isFunctionOrMethod() ||
-      !Tag->hasNameForLinkage();
+    bool IsLocalOrUnnamed = Tag->getDeclContext()->isFunctionOrMethod() ||
+                            !Tag->hasNameForLinkage();
     return CachedProperties(L, IsLocalOrUnnamed);
   }
 
@@ -4757,14 +4787,14 @@ bool Type::hasUnnamedOrLocalType() const {
 
 LinkageInfo LinkageComputer::computeTypeLinkageInfo(const Type *T) {
   switch (T->getTypeClass()) {
-#define TYPE(Class,Base)
-#define NON_CANONICAL_TYPE(Class,Base) case Type::Class:
+#define TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
 #include "clang/AST/TypeNodes.inc"
     llvm_unreachable("didn't expect a non-canonical type here");
 
-#define TYPE(Class,Base)
-#define DEPENDENT_TYPE(Class,Base) case Type::Class:
-#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class,Base) case Type::Class:
+#define TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
 #include "clang/AST/TypeNodes.inc"
     // Treat instantiation-dependent types as external.
     assert(T->isInstantiationDependentType());
@@ -4882,9 +4912,9 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const 
{
   QualType type = getCanonicalTypeInternal();
 
   switch (type->getTypeClass()) {
-  // We'll only see canonical types here.
-#define NON_CANONICAL_TYPE(Class, Parent)       \
-  case Type::Class:                             \
+    // We'll only see canonical types here.
+#define NON_CANONICAL_TYPE(Class, Parent)                                      
\
+  case Type::Class:                                                            
\
     llvm_unreachable("non-canonical type");
 #define TYPE(Class, Parent)
 #include "clang/AST/TypeNodes.inc"
@@ -4948,22 +4978,19 @@ bool Type::canHaveNullability(bool ResultIfUnknown) 
const {
     case BuiltinType::ObjCId:
     case BuiltinType::ObjCClass:
     case BuiltinType::ObjCSel:
-#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
-    case BuiltinType::Id:
+#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix)                   
\
+  case BuiltinType::Id:
 #include "clang/Basic/OpenCLImageTypes.def"
-#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
-    case BuiltinType::Id:
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) case BuiltinType::Id:
 #include "clang/Basic/OpenCLExtensionTypes.def"
     case BuiltinType::OCLSampler:
     case BuiltinType::OCLEvent:
     case BuiltinType::OCLClkEvent:
     case BuiltinType::OCLQueue:
     case BuiltinType::OCLReserveID:
-#define SVE_TYPE(Name, Id, SingletonId) \
-    case BuiltinType::Id:
+#define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
 #include "clang/Basic/AArch64SVEACLETypes.def"
-#define PPC_VECTOR_TYPE(Name, Id, Size) \
-    case BuiltinType::Id:
+#define PPC_VECTOR_TYPE(Name, Id, Size) case BuiltinType::Id:
 #include "clang/Basic/PPCTypes.def"
 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
 #include "clang/Basic/RISCVVTypes.def"
@@ -5131,8 +5158,7 @@ bool Type::isObjCIndependentClassType() const {
 }
 
 bool Type::isObjCRetainableType() const {
-  return isObjCObjectPointerType() ||
-         isBlockPointerType() ||
+  return isObjCObjectPointerType() || isBlockPointerType() ||
          isObjCNSObjectType();
 }
 
@@ -5188,7 +5214,8 @@ bool Type::isCUDADeviceBuiltinTextureType() const {
 }
 
 bool Type::hasSizedVLAType() const {
-  if (!isVariablyModifiedType()) return false;
+  if (!isVariablyModifiedType())
+    return false;
 
   if (const auto *ptr = getAs<PointerType>())
     return ptr->getPointeeType()->hasSizedVLAType();
@@ -5245,8 +5272,7 @@ QualType::DestructionKind 
QualType::isDestructedTypeImpl(QualType type) {
     return DK_objc_weak_lifetime;
   }
 
-  if (const auto *RT =
-          type->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
+  if (const auto *RT = type->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
     const RecordDecl *RD = RT->getDecl();
     if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
       /// Check if this is a C++ object with a non-trivial destructor.
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index c7fbbbc6fd40d..83b0e8e965770 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -50,31 +50,54 @@ using namespace CodeGen;
 
 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
   switch (CC) {
-  default: return llvm::CallingConv::C;
-  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
-  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
-  case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
-  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
-  case CC_Win64: return llvm::CallingConv::Win64;
-  case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
-  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
-  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
-  case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
+  default:
+    return llvm::CallingConv::C;
+  case CC_X86StdCall:
+    return llvm::CallingConv::X86_StdCall;
+  case CC_X86FastCall:
+    return llvm::CallingConv::X86_FastCall;
+  case CC_X86RegCall:
+    return llvm::CallingConv::X86_RegCall;
+  case CC_X86ThisCall:
+    return llvm::CallingConv::X86_ThisCall;
+  case CC_Win64:
+    return llvm::CallingConv::Win64;
+  case CC_X86_64SysV:
+    return llvm::CallingConv::X86_64_SysV;
+  case CC_AAPCS:
+    return llvm::CallingConv::ARM_AAPCS;
+  case CC_AAPCS_VFP:
+    return llvm::CallingConv::ARM_AAPCS_VFP;
+  case CC_IntelOclBicc:
+    return llvm::CallingConv::Intel_OCL_BI;
   // TODO: Add support for __pascal to LLVM.
-  case CC_X86Pascal: return llvm::CallingConv::C;
+  case CC_X86Pascal:
+    return llvm::CallingConv::C;
   // TODO: Add support for __vectorcall to LLVM.
-  case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
-  case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
-  case CC_AArch64SVEPCS: return llvm::CallingConv::AArch64_SVE_VectorCall;
-  case CC_AMDGPUKernelCall: return llvm::CallingConv::AMDGPU_KERNEL;
-  case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
-  case CC_OpenCLKernel: return 
CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
-  case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
-  case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
-  case CC_Swift: return llvm::CallingConv::Swift;
-  case CC_SwiftAsync: return llvm::CallingConv::SwiftTail;
-  case CC_M68kRTD: return llvm::CallingConv::M68k_RTD;
-  case CC_PreserveNone: return llvm::CallingConv::PreserveNone;
+  case CC_X86VectorCall:
+    return llvm::CallingConv::X86_VectorCall;
+  case CC_AArch64VectorCall:
+    return llvm::CallingConv::AArch64_VectorCall;
+  case CC_AArch64SVEPCS:
+    return llvm::CallingConv::AArch64_SVE_VectorCall;
+  case CC_AMDGPUKernelCall:
+    return llvm::CallingConv::AMDGPU_KERNEL;
+  case CC_SpirFunction:
+    return llvm::CallingConv::SPIR_FUNC;
+  case CC_OpenCLKernel:
+    return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
+  case CC_PreserveMost:
+    return llvm::CallingConv::PreserveMost;
+  case CC_PreserveAll:
+    return llvm::CallingConv::PreserveAll;
+  case CC_Swift:
+    return llvm::CallingConv::Swift;
+  case CC_SwiftAsync:
+    return llvm::CallingConv::SwiftTail;
+  case CC_M68kRTD:
+    return llvm::CallingConv::M68k_RTD;
+  case CC_PreserveNone:
+    return llvm::CallingConv::PreserveNone;
     // clang-format off
   case CC_RISCVVectorCall: return llvm::CallingConv::RISCV_VectorCall;
     // clang-format on
@@ -110,14 +133,16 @@ CanQualType CodeGenTypes::DeriveThisType(const 
CXXRecordDecl *RD,
     RecTy = Context.VoidTy;
 
   if (MD)
-    RecTy = Context.getAddrSpaceQualType(RecTy, 
MD->getMethodQualifiers().getAddressSpace());
+    RecTy = Context.getAddrSpaceQualType(
+        RecTy, MD->getMethodQualifiers().getAddressSpace());
   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
 }
 
 /// Returns the canonical formal type of the given C++ method.
 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
-  return MD->getType()->getCanonicalTypeUnqualified()
-           .getAs<FunctionProtoType>();
+  return MD->getType()
+      ->getCanonicalTypeUnqualified()
+      .getAs<FunctionProtoType>();
 }
 
 /// Returns the "extra-canonicalized" return type, which discards
@@ -140,10 +165,8 @@ 
CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
 }
 
 static void addExtParameterInfosForCall(
-         llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> 
&paramInfos,
-                                        const FunctionProtoType *proto,
-                                        unsigned prefixArgs,
-                                        unsigned totalArgs) {
+    llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
+    const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs) {
   assert(proto->hasExtParameterInfos());
   assert(paramInfos.size() <= prefixArgs);
   assert(proto->getNumParams() + prefixArgs <= totalArgs);
@@ -169,10 +192,10 @@ static void addExtParameterInfosForCall(
 
 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
-static void appendParameterTypes(const CodeGenTypes &CGT,
-                                 SmallVectorImpl<CanQualType> &prefix,
-              SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
-                                 CanQual<FunctionProtoType> FPT) {
+static void appendParameterTypes(
+    const CodeGenTypes &CGT, SmallVectorImpl<CanQualType> &prefix,
+    SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
+    CanQual<FunctionProtoType> FPT) {
   // Fast path: don't touch param info if we don't need to.
   if (!FPT->hasExtParameterInfos()) {
     assert(paramInfos.empty() &&
@@ -459,13 +482,9 @@ getExtParameterInfosForCall(const FunctionProtoType 
*proto, unsigned prefixArgs,
 /// args.
 /// PassProtoArgs indicates whether `args` has args for the parameters in the
 /// given CXXConstructorDecl.
-const CGFunctionInfo &
-CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
-                                        const CXXConstructorDecl *D,
-                                        CXXCtorType CtorKind,
-                                        unsigned ExtraPrefixArgs,
-                                        unsigned ExtraSuffixArgs,
-                                        bool PassProtoArgs) {
+const CGFunctionInfo &CodeGenTypes::arrangeCXXConstructorCall(
+    const CallArgList &args, const CXXConstructorDecl *D, CXXCtorType CtorKind,
+    unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs) {
   CanQualTypeList ArgTypes;
   for (const auto &Arg : args)
     ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
@@ -570,7 +589,7 @@ CodeGenTypes::arrangeObjCMessageSendSignature(const 
ObjCMethodDecl *MD,
     einfo = einfo.withProducesResult(true);
 
   RequiredArgs required =
-    (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
+      (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
 
   return arrangeLLVMFunctionInfo(GetReturnType(MD->getReturnType()),
                                  FnInfoOpts::None, argTys, einfo, 
extParamInfos,
@@ -587,8 +606,7 @@ CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType 
returnType,
                                  argTypes, einfo, {}, RequiredArgs::All);
 }
 
-const CGFunctionInfo &
-CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
+const CGFunctionInfo &CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
   // FIXME: Do we need to handle ObjCMethodDecl?
   if (isa<CXXConstructorDecl>(GD.getDecl()) ||
       isa<CXXDestructorDecl>(GD.getDecl()))
@@ -634,12 +652,9 @@ CodeGenTypes::arrangeMSCtorClosure(const 
CXXConstructorDecl *CD,
 /// Arrange a call as unto a free function, except possibly with an
 /// additional number of formal parameters considered required.
 static const CGFunctionInfo &
-arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
-                            CodeGenModule &CGM,
-                            const CallArgList &args,
-                            const FunctionType *fnType,
-                            unsigned numExtraRequiredArgs,
-                            bool chainCall) {
+arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM,
+                            const CallArgList &args, const FunctionType 
*fnType,
+                            unsigned numExtraRequiredArgs, bool chainCall) {
   assert(args.size() >= numExtraRequiredArgs);
 
   ExtParameterInfoList paramInfos;
@@ -657,13 +672,12 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
       addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
                                   args.size());
 
-  // If we don't have a prototype at all, but we're supposed to
-  // explicitly use the variadic convention for unprototyped calls,
-  // treat all of the arguments as required but preserve the nominal
-  // possibility of variadics.
-  } else if (CGM.getTargetCodeGenInfo()
-                .isNoProtoCallVariadic(args,
-                                       cast<FunctionNoProtoType>(fnType))) {
+    // If we don't have a prototype at all, but we're supposed to
+    // explicitly use the variadic convention for unprototyped calls,
+    // treat all of the arguments as required but preserve the nominal
+    // possibility of variadics.
+  } else if (CGM.getTargetCodeGenInfo().isNoProtoCallVariadic(
+                 args, cast<FunctionNoProtoType>(fnType))) {
     required = RequiredArgs(args.size());
   }
 
@@ -680,10 +694,8 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
 /// type using the given arguments.  The arguments are necessary
 /// because the function might be unprototyped, in which case it's
 /// target-dependent in crazy ways.
-const CGFunctionInfo &
-CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
-                                      const FunctionType *fnType,
-                                      bool chainCall) {
+const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionCall(
+    const CallArgList &args, const FunctionType *fnType, bool chainCall) {
   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
                                      chainCall ? 1 : 0, chainCall);
 }
@@ -731,9 +743,8 @@ CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType 
resultType,
                                  RequiredArgs::All);
 }
 
-const CGFunctionInfo &
-CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
-                                              ArrayRef<CanQualType> argTypes) {
+const CGFunctionInfo &CodeGenTypes::arrangeBuiltinFunctionDeclaration(
+    CanQualType resultType, ArrayRef<CanQualType> argTypes) {
   return arrangeLLVMFunctionInfo(resultType, FnInfoOpts::None, argTypes,
                                  FunctionType::ExtInfo(), {},
                                  RequiredArgs::All);
@@ -754,11 +765,9 @@ CodeGenTypes::arrangeSYCLKernelCallerDeclaration(QualType 
resultType,
 ///
 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
 /// does not count `this`.
-const CGFunctionInfo &
-CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
-                                   const FunctionProtoType *proto,
-                                   RequiredArgs required,
-                                   unsigned numPrefixArgs) {
+const CGFunctionInfo &CodeGenTypes::arrangeCXXMethodCall(
+    const CallArgList &args, const FunctionProtoType *proto,
+    RequiredArgs required, unsigned numPrefixArgs) {
   assert(numPrefixArgs + 1 <= args.size() &&
          "Emitting a call with less args than the required prefix?");
   // Add one to account for `this`. It's a bit awkward here, but we don't count
@@ -780,9 +789,8 @@ const CGFunctionInfo 
&CodeGenTypes::arrangeNullaryFunction() {
                                  RequiredArgs::All);
 }
 
-const CGFunctionInfo &
-CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
-                          const CallArgList &args) {
+const CGFunctionInfo &CodeGenTypes::arrangeCall(const CGFunctionInfo 
&signature,
+                                                const CallArgList &args) {
   assert(signature.arg_size() <= args.size());
   if (signature.arg_size() == args.size())
     return signature;
@@ -813,7 +821,7 @@ namespace clang {
 namespace CodeGen {
 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
 }
-}
+} // namespace clang
 
 /// Arrange the argument and result information for an abstract value
 /// of a given function type.  This is the method which all of the
@@ -875,7 +883,8 @@ const CGFunctionInfo &CodeGenTypes::arrangeLLVMFunctionInfo(
     if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
       I.info.setCoerceToType(ConvertType(I.type));
 
-  bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
+  bool erased = FunctionsBeingProcessed.erase(FI);
+  (void)erased;
   assert(erased && "Not in set?");
 
   return *FI;
@@ -892,11 +901,10 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 
bool instanceMethod,
   assert(!required.allowsOptionalArgs() ||
          required.getNumRequiredArgs() <= argTypes.size());
 
-  void *buffer =
-    operator new(totalSizeToAlloc<ArgInfo,             ExtParameterInfo>(
-                                  argTypes.size() + 1, paramInfos.size()));
+  void *buffer = operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
+      argTypes.size() + 1, paramInfos.size()));
 
-  CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
+  CGFunctionInfo *FI = new (buffer) CGFunctionInfo();
   FI->CallingConvention = llvmCC;
   FI->EffectiveCallingConvention = llvmCC;
   FI->ASTCallingConvention = info.getCC();
@@ -985,11 +993,9 @@ struct ComplexExpansion : TypeExpansion {
 
 struct NoExpansion : TypeExpansion {
   NoExpansion() : TypeExpansion(TEK_None) {}
-  static bool classof(const TypeExpansion *TE) {
-    return TE->Kind == TEK_None;
-  }
+  static bool classof(const TypeExpansion *TE) { return TE->Kind == TEK_None; }
 };
-}  // namespace
+} // namespace
 
 static std::unique_ptr<TypeExpansion>
 getTypeExpansion(QualType Ty, const ASTContext &Context) {
@@ -1038,7 +1044,7 @@ getTypeExpansion(QualType Ty, const ASTContext &Context) {
       }
     }
     return std::make_unique<RecordExpansion>(std::move(Bases),
-                                              std::move(Fields));
+                                             std::move(Fields));
   }
   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
     return std::make_unique<ComplexExpansion>(CT->getElementType());
@@ -1065,9 +1071,8 @@ static int getExpansionSize(QualType Ty, const ASTContext 
&Context) {
   return 1;
 }
 
-void
-CodeGenTypes::getExpandedTypes(QualType Ty,
-                               SmallVectorImpl<llvm::Type *>::iterator &TI) {
+void CodeGenTypes::getExpandedTypes(
+    QualType Ty, SmallVectorImpl<llvm::Type *>::iterator &TI) {
   auto Exp = getTypeExpansion(Ty, Context);
   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
     for (int i = 0, n = CAExp->NumElts; i < n; i++) {
@@ -1158,14 +1163,13 @@ void CodeGenFunction::ExpandTypeToArgs(
   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
     Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
                                    : 
Arg.getKnownRValue().getAggregateAddress();
-    forConstantArrayExpansion(
-        *this, CAExp, Addr, [&](Address EltAddr) {
-          CallArg EltArg = CallArg(
-              convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
-              CAExp->EltTy);
-          ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
-                           IRCallArgPos);
-        });
+    forConstantArrayExpansion(*this, CAExp, Addr, [&](Address EltAddr) {
+      CallArg EltArg =
+          CallArg(convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
+                  CAExp->EltTy);
+      ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
+                       IRCallArgPos);
+    });
   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
     Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
                                    : 
Arg.getKnownRValue().getAggregateAddress();
@@ -1224,12 +1228,13 @@ static RawAddress 
CreateTempAllocaForCoercion(CodeGenFunction &CGF,
 /// accessing some number of bytes out of it, try to gep into the struct to get
 /// at its inner goodness.  Dive as deep as possible without entering an 
element
 /// with an in-memory size smaller than DstSize.
-static Address
-EnterStructPointerForCoercedAccess(Address SrcPtr,
-                                   llvm::StructType *SrcSTy,
-                                   uint64_t DstSize, CodeGenFunction &CGF) {
+static Address EnterStructPointerForCoercedAccess(Address SrcPtr,
+                                                  llvm::StructType *SrcSTy,
+                                                  uint64_t DstSize,
+                                                  CodeGenFunction &CGF) {
   // We can't dive into a zero-element struct.
-  if (SrcSTy->getNumElements() == 0) return SrcPtr;
+  if (SrcSTy->getNumElements() == 0)
+    return SrcPtr;
 
   llvm::Type *FirstElt = SrcSTy->getElementType(0);
 
@@ -1237,8 +1242,7 @@ EnterStructPointerForCoercedAccess(Address SrcPtr,
   // first element is the same size as the whole struct, we can enter it. The
   // comparison must be made on the store size and not the alloca size. Using
   // the alloca size may overstate the size of the load.
-  uint64_t FirstEltSize =
-    CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
+  uint64_t FirstEltSize = CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
   if (FirstEltSize < DstSize &&
       FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
     return SrcPtr;
@@ -1261,8 +1265,7 @@ EnterStructPointerForCoercedAccess(Address SrcPtr,
 /// This behaves as if the value were coerced through memory, so on big-endian
 /// targets the high bits are preserved in a truncation, while little-endian
 /// targets preserve the low bits.
-static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
-                                             llvm::Type *Ty,
+static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty,
                                              CodeGenFunction &CGF) {
   if (Val->getType() == Ty)
     return Val;
@@ -1306,8 +1309,6 @@ static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value 
*Val,
   return Val;
 }
 
-
-
 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
 /// a pointer to an object of type \arg Ty, known to be aligned to
 /// \arg SrcAlign bytes.
@@ -1459,8 +1460,8 @@ static Address emitAddressAtOffset(CodeGenFunction &CGF, 
Address addr,
                                    const ABIArgInfo &info) {
   if (unsigned offset = info.getDirectOffset()) {
     addr = addr.withElementType(CGF.Int8Ty);
-    addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
-                                             CharUnits::fromQuantity(offset));
+    addr = CGF.Builder.CreateConstInBoundsByteGEP(
+        addr, CharUnits::fromQuantity(offset));
     addr = addr.withElementType(info.getCoerceToType());
   }
   return addr;
@@ -1630,7 +1631,7 @@ void ClangToLLVMArgMapping::construct(const ASTContext 
&Context,
 
   TotalIRArgs = IRArgNo;
 }
-}  // namespace
+} // namespace
 
 /***/
 
@@ -1682,8 +1683,7 @@ llvm::FunctionType 
*CodeGenTypes::GetFunctionType(GlobalDecl GD) {
   return GetFunctionType(FI);
 }
 
-llvm::FunctionType *
-CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
+llvm::FunctionType *CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
 
   bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
   (void)Inserted;
@@ -1723,7 +1723,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
   }
 
   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
-  SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
+  SmallVector<llvm::Type *, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
 
   // Add type for sret argument.
   if (IRFunctionArgs.hasSRetArg()) {
@@ -1802,7 +1802,8 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
     }
   }
 
-  bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
+  bool Erased = FunctionsBeingProcessed.erase(&FI);
+  (void)Erased;
   assert(Erased && "Not in set?");
 
   return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
@@ -2013,8 +2014,7 @@ static void getTrivialDefaultFunctionAttributes(
     // TODO: Reciprocal estimate codegen options should apply to instructions?
     const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
     if (!Recips.empty())
-      FuncAttrs.addAttribute("reciprocal-estimates",
-                             llvm::join(Recips, ","));
+      FuncAttrs.addAttribute("reciprocal-estimates", llvm::join(Recips, ","));
 
     if (!CodeGenOpts.PreferVectorWidth.empty() &&
         CodeGenOpts.PreferVectorWidth != "none")
@@ -2745,8 +2745,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
 
     llvm::AttrBuilder Attrs(getLLVMContext());
 
-    QualType ThisTy =
-        FI.arg_begin()->type.getTypePtr()->getPointeeType();
+    QualType ThisTy = FI.arg_begin()->type.getTypePtr()->getPointeeType();
 
     if (!CodeGenOpts.NullPointerIsValid &&
         getTypes().getTargetAddressSpace(FI.arg_begin()->type) == 0) {
@@ -2773,8 +2772,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
   }
 
   unsigned ArgNo = 0;
-  for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
-                                          E = FI.arg_end();
+  for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), E = FI.arg_end();
        I != E; ++I, ++ArgNo) {
     QualType ParamType = I->type;
     const ABIArgInfo &AI = I->info;
@@ -2784,9 +2782,9 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
     if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
       if (AI.getPaddingInReg()) {
         ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
-            llvm::AttributeSet::get(
-                getLLVMContext(),
-                
llvm::AttrBuilder(getLLVMContext()).addAttribute(llvm::Attribute::InReg));
+            llvm::AttributeSet::get(getLLVMContext(),
+                                    llvm::AttrBuilder(getLLVMContext())
+                                        .addAttribute(llvm::Attribute::InReg));
       }
     }
 
@@ -2879,8 +2877,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
     if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
       QualType PTy = RefTy->getPointeeType();
       if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
-        Attrs.addDereferenceableAttr(
-            getMinimumObjectSize(PTy).getQuantity());
+        Attrs.addDereferenceableAttr(getMinimumObjectSize(PTy).getQuantity());
       if (getTypes().getTargetAddressSpace(PTy) == 0 &&
           !CodeGenOpts.NullPointerIsValid)
         Attrs.addAttribute(llvm::Attribute::NonNull);
@@ -2974,10 +2971,11 @@ static llvm::Value 
*emitArgumentDemotion(CodeGenFunction &CGF,
 
   // This can happen with promotions that actually don't change the
   // underlying type, like the enum promotions.
-  if (value->getType() == varType) return value;
+  if (value->getType() == varType)
+    return value;
 
-  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
-         && "unexpected promotion type");
+  assert((varType->isIntegerTy() || varType->isFloatingPointTy()) &&
+         "unexpected promotion type");
 
   if (isa<llvm::IntegerType>(varType))
     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
@@ -3014,16 +3012,16 @@ static const NonNullAttr *getNonNullAttr(const Decl 
*FD, const ParmVarDecl *PVD,
 }
 
 namespace {
-  struct CopyBackSwiftError final : EHScopeStack::Cleanup {
-    Address Temp;
-    Address Arg;
-    CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
-    void Emit(CodeGenFunction &CGF, Flags flags) override {
-      llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
-      CGF.Builder.CreateStore(errorValue, Arg);
-    }
-  };
-}
+struct CopyBackSwiftError final : EHScopeStack::Cleanup {
+  Address Temp;
+  Address Arg;
+  CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
+  void Emit(CodeGenFunction &CGF, Flags flags) override {
+    llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
+    CGF.Builder.CreateStore(errorValue, Arg);
+  }
+};
+} // namespace
 
 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
                                          llvm::Function *Fn,
@@ -3039,8 +3037,8 @@ void CodeGenFunction::EmitFunctionProlog(const 
CGFunctionInfo &FI,
   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
     if (FD->hasImplicitReturnZero()) {
       QualType RetTy = FD->getReturnType().getUnqualifiedType();
-      llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
-      llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
+      llvm::Type *LLVMTy = CGM.getTypes().ConvertType(RetTy);
+      llvm::Constant *Zero = llvm::Constant::getNullValue(LLVMTy);
       Builder.CreateStore(Zero, ReturnValue);
     }
   }
@@ -3079,13 +3077,13 @@ void CodeGenFunction::EmitFunctionProlog(const 
CGFunctionInfo &FI,
          "Mismatch between function signature & arguments.");
   unsigned ArgNo = 0;
   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
-  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
-       i != e; ++i, ++info_it, ++ArgNo) {
+  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != 
e;
+       ++i, ++info_it, ++ArgNo) {
     const VarDecl *Arg = *i;
     const ABIArgInfo &ArgI = info_it->info;
 
     bool isPromoted =
-      isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
+        isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
     // We are converting from ABIArgInfo type to VarDecl type directly, unless
     // the parameter is promoted. In this case we convert to
     // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
@@ -3170,8 +3168,7 @@ void CodeGenFunction::EmitFunctionProlog(const 
CGFunctionInfo &FI,
             AI->addAttr(llvm::Attribute::NonNull);
 
           QualType OTy = PVD->getOriginalType();
-          if (const auto *ArrTy =
-              getContext().getAsConstantArrayType(OTy)) {
+          if (const auto *ArrTy = getContext().getAsConstantArrayType(OTy)) {
             // A C99 array parameter declaration with the static keyword also
             // indicates dereferenceability, and if the size is constant we can
             // use the dereferenceable attribute (which requires the size in
@@ -3180,7 +3177,8 @@ void CodeGenFunction::EmitFunctionProlog(const 
CGFunctionInfo &FI,
               QualType ETy = ArrTy->getElementType();
               llvm::Align Alignment =
                   CGM.getNaturalTypeAlignment(ETy).getAsAlign();
-              
AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
+              AI->addAttrs(llvm::AttrBuilder(getLLVMContext())
+                               .addAlignmentAttr(Alignment));
               uint64_t ArrSize = ArrTy->getZExtSize();
               if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
                   ArrSize) {
@@ -3196,7 +3194,7 @@ void CodeGenFunction::EmitFunctionProlog(const 
CGFunctionInfo &FI,
               }
             }
           } else if (const auto *ArrTy =
-                     getContext().getAsVariableArrayType(OTy)) {
+                         getContext().getAsVariableArrayType(OTy)) {
             // For C99 VLAs with the static keyword, we don't know the size so
             // we can't use the dereferenceable attribute, but in addrspace(0)
             // we know that it must be nonnull.
@@ -3204,7 +3202,8 @@ void CodeGenFunction::EmitFunctionProlog(const 
CGFunctionInfo &FI,
               QualType ETy = ArrTy->getElementType();
               llvm::Align Alignment =
                   CGM.getNaturalTypeAlignment(ETy).getAsAlign();
-              
AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
+              AI->addAttrs(llvm::AttrBuilder(getLLVMContext())
+                               .addAlignmentAttr(Alignment));
               if (!getTypes().getTargetAddressSpace(ETy) &&
                   !CGM.getCodeGenOpts().NullPointerIsValid)
                 AI->addAttr(llvm::Attribute::NonNull);
@@ -3226,8 +3225,8 @@ void CodeGenFunction::EmitFunctionProlog(const 
CGFunctionInfo &FI,
                 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
             if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
               AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
-              
AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(
-                  llvm::Align(AlignmentInt)));
+              AI->addAttrs(llvm::AttrBuilder(getLLVMContext())
+                               .addAlignmentAttr(llvm::Align(AlignmentInt)));
             }
           }
         }
@@ -3247,8 +3246,8 @@ void CodeGenFunction::EmitFunctionProlog(const 
CGFunctionInfo &FI,
         // LLVM expects swifterror parameters to be used in very restricted
         // ways.  Copy the value into a less-restricted temporary.
         llvm::Value *V = AI;
-        if (FI.getExtParameterInfo(ArgNo).getABI()
-              == ParameterABI::SwiftErrorResult) {
+        if (FI.getExtParameterInfo(ArgNo).getABI() ==
+            ParameterABI::SwiftErrorResult) {
           QualType pointeeTy = Ty->getPointeeType();
           assert(pointeeTy->isPointerType());
           RawAddress temp =
@@ -3315,8 +3314,8 @@ void CodeGenFunction::EmitFunctionProlog(const 
CGFunctionInfo &FI,
 
       llvm::StructType *STy =
           dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
-      Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
-                                     Arg->getName());
+      Address Alloca =
+          CreateMemTemp(Ty, getContext().getDeclAlign(Arg), Arg->getName());
 
       // Pointer to store into.
       Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
@@ -3478,7 +3477,8 @@ void CodeGenFunction::EmitFunctionProlog(const 
CGFunctionInfo &FI,
 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
   while (insn->use_empty()) {
     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
-    if (!bitcast) return;
+    if (!bitcast)
+      return;
 
     // This is "safe" because we would have used a ConstantExpr otherwise.
     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
@@ -3491,8 +3491,10 @@ static llvm::Value 
*tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
                                                     llvm::Value *result) {
   // We must be immediately followed the cast.
   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
-  if (BB->empty()) return nullptr;
-  if (&BB->back() != result) return nullptr;
+  if (BB->empty())
+    return nullptr;
+  if (&BB->back() != result)
+    return nullptr;
 
   llvm::Type *resultType = result->getType();
 
@@ -3518,9 +3520,11 @@ static llvm::Value 
*tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
   // Look for:
   //   %generator = call i8* @objc_retain(i8* %originalResult)
   // or
-  //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* 
%originalResult)
+  //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8*
+  //   %originalResult)
   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
-  if (!call) return nullptr;
+  if (!call)
+    return nullptr;
 
   bool doRetainAutorelease;
 
@@ -3557,7 +3561,8 @@ static llvm::Value 
*tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
   // Keep killing bitcasts, for sanity.  Note that we no longer care
   // about precise ordering as long as there's exactly one use.
   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
-    if (!bitcast->hasOneUse()) break;
+    if (!bitcast->hasOneUse())
+      break;
     InstsToKill.push_back(bitcast);
     result = bitcast->getOperand(0);
   }
@@ -3579,10 +3584,12 @@ static llvm::Value 
*tryRemoveRetainOfSelf(CodeGenFunction &CGF,
                                           llvm::Value *result) {
   // This is only applicable to a method with an immutable 'self'.
   const ObjCMethodDecl *method =
-    dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
-  if (!method) return nullptr;
+      dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
+  if (!method)
+    return nullptr;
   const VarDecl *self = method->getSelfDecl();
-  if (!self->getType().isConstQualified()) return nullptr;
+  if (!self->getType().isConstQualified())
+    return nullptr;
 
   // Look for a retain call. Note: stripPointerCasts looks through returned arg
   // functions, which would cause us to miss the retain.
@@ -3594,7 +3601,7 @@ static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction 
&CGF,
   // Look for an ordinary load of 'self'.
   llvm::Value *retainedValue = retainCall->getArgOperand(0);
   llvm::LoadInst *load =
-    dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
+      dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
   if (!load || load->isAtomic() || load->isVolatile() ||
       load->getPointerOperand() != 
CGF.GetAddrOfLocalVar(self).getBasePointer())
     return nullptr;
@@ -3657,7 +3664,8 @@ static llvm::StoreInst 
*findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
   // with noreturn cleanups.
   if (!ReturnValuePtr->hasOneUse()) {
     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
-    if (IP->empty()) return nullptr;
+    if (IP->empty())
+      return nullptr;
 
     // Look at directly preceding instruction, skipping bitcasts, lifetime
     // markers, and fake uses and their operands.
@@ -3685,7 +3693,8 @@ static llvm::StoreInst 
*findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
   }
 
   llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back());
-  if (!store) return nullptr;
+  if (!store)
+    return nullptr;
 
   // Now do a first-and-dirty dominance check: just walk up the
   // single-predecessors chain from the current insertion point.
@@ -3780,8 +3789,7 @@ static void setUsedBits(CodeGenModule &CGM, const 
RecordType *RTy, int Offset,
     if (F->isBitField()) {
       const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F);
       setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(),
-                  BFI.StorageSize / CharWidth, BFI.Offset,
-                  BFI.Size, CharWidth,
+                  BFI.StorageSize / CharWidth, BFI.Offset, BFI.Size, CharWidth,
                   CGM.getDataLayout().isBigEndian());
       continue;
     }
@@ -3945,7 +3953,7 @@ void CodeGenFunction::EmitFunctionEpilog(const 
CGFunctionInfo &FI,
     switch (getEvaluationKind(RetTy)) {
     case TEK_Complex: {
       ComplexPairTy RT =
-        EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
+          EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
       EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
                          /*isInit*/ true);
       break;
@@ -3979,8 +3987,7 @@ void CodeGenFunction::EmitFunctionEpilog(const 
CGFunctionInfo &FI,
 
       // If there is a dominating store to ReturnValue, we can elide
       // the load, zap the store, and usually zap the alloca.
-      if (llvm::StoreInst *SI =
-              findDominatingStoreToReturnValue(*this)) {
+      if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
         // Reuse the debug location from the store unless there is
         // cleanup code to be emitted between the store and return
         // instruction.
@@ -3990,7 +3997,7 @@ void CodeGenFunction::EmitFunctionEpilog(const 
CGFunctionInfo &FI,
         RV = SI->getValueOperand();
         SI->eraseFromParent();
 
-      // Otherwise, we have to do a simple load.
+        // Otherwise, we have to do a simple load.
       } else {
         RV = Builder.CreateLoad(ReturnValue);
       }
@@ -4020,8 +4027,7 @@ void CodeGenFunction::EmitFunctionEpilog(const 
CGFunctionInfo &FI,
       else
         llvm_unreachable("Unexpected function/method type");
 
-      assert(getLangOpts().ObjCAutoRefCount &&
-             !FI.isReturnsRetained() &&
+      assert(getLangOpts().ObjCAutoRefCount && !FI.isReturnsRetained() &&
              RT->isObjCRetainableType());
 #endif
       RV = emitAutoreleaseOfResult(*this, RV);
@@ -4038,7 +4044,7 @@ void CodeGenFunction::EmitFunctionEpilog(const 
CGFunctionInfo &FI,
     auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
 
     // Load all of the coerced elements out into results.
-    llvm::SmallVector<llvm::Value*, 4> results;
+    llvm::SmallVector<llvm::Value *, 4> results;
     Address addr = ReturnValue.withElementType(coercionType);
     unsigned unpaddedIndex = 0;
     for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
@@ -4059,7 +4065,7 @@ void CodeGenFunction::EmitFunctionEpilog(const 
CGFunctionInfo &FI,
     if (results.size() == 1) {
       RV = results[0];
 
-    // Otherwise, we need to make a first-class aggregate.
+      // Otherwise, we need to make a first-class aggregate.
     } else {
       // Construct a return type that lacks padding elements.
       llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
@@ -4165,8 +4171,7 @@ static bool isInAllocaArgument(CGCXXABI &ABI, QualType 
type) {
   return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
 }
 
-static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
-                                          QualType Ty) {
+static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) {
   // FIXME: Generate IR in one pass, rather than going back and fixing up these
   // placeholders.
   llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
@@ -4178,12 +4183,10 @@ static AggValueSlot 
createPlaceholderSlot(CodeGenFunction &CGF,
   CharUnits Align = CharUnits::fromQuantity(4);
   Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
 
-  return AggValueSlot::forAddr(Address(Placeholder, IRTy, Align),
-                               Ty.getQualifiers(),
-                               AggValueSlot::IsNotDestructed,
-                               AggValueSlot::DoesNotNeedGCBarriers,
-                               AggValueSlot::IsNotAliased,
-                               AggValueSlot::DoesNotOverlap);
+  return AggValueSlot::forAddr(
+      Address(Placeholder, IRTy, Align), Ty.getQualifiers(),
+      AggValueSlot::IsNotDestructed, AggValueSlot::DoesNotNeedGCBarriers,
+      AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap);
 }
 
 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
@@ -4201,22 +4204,21 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList 
&args,
   if (type->isReferenceType()) {
     args.add(RValue::get(Builder.CreateLoad(local)), type);
 
-  // In ARC, move out of consumed arguments so that the release cleanup
-  // entered by StartFunction doesn't cause an over-release.  This isn't
-  // optimal -O0 code generation, but it should get cleaned up when
-  // optimization is enabled.  This also assumes that delegate calls are
-  // performed exactly once for a set of arguments, but that should be safe.
+    // In ARC, move out of consumed arguments so that the release cleanup
+    // entered by StartFunction doesn't cause an over-release.  This isn't
+    // optimal -O0 code generation, but it should get cleaned up when
+    // optimization is enabled.  This also assumes that delegate calls are
+    // performed exactly once for a set of arguments, but that should be safe.
   } else if (getLangOpts().ObjCAutoRefCount &&
-             param->hasAttr<NSConsumedAttr>() &&
-             type->isObjCRetainableType()) {
+             param->hasAttr<NSConsumedAttr>() && type->isObjCRetainableType()) 
{
     llvm::Value *ptr = Builder.CreateLoad(local);
     auto null =
-      llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
+        
llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
     Builder.CreateStore(null, local);
     args.add(RValue::get(ptr), type);
 
-  // For the most part, we just need to load the alloca, except that
-  // aggregate r-values are actually pointers to temporaries.
+    // For the most part, we just need to load the alloca, except that
+    // aggregate r-values are actually pointers to temporaries.
   } else {
     args.add(convertTempToRValue(local, type, loc), type);
   }
@@ -4308,7 +4310,7 @@ static void emitWriteback(CodeGenFunction &CGF,
     // Release the old value.
     CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
 
-  // Otherwise, we can just do a normal lvalue store.
+    // Otherwise, we can just do a normal lvalue store.
   } else {
     CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
   }
@@ -4321,7 +4323,7 @@ static void emitWriteback(CodeGenFunction &CGF,
 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
                                             const CallArgList &CallArgs) {
   ArrayRef<CallArgList::CallArgCleanup> Cleanups =
-    CallArgs.getCleanupsToDeactivate();
+      CallArgs.getCleanupsToDeactivate();
   // Iterate in reverse to increase the likelihood of popping the cleanup.
   for (const auto &I : llvm::reverse(Cleanups)) {
     CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
@@ -4349,12 +4351,12 @@ static void emitWritebackArg(CodeGenFunction &CGF, 
CallArgList &args,
   if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
     srcLV = CGF.EmitLValue(lvExpr);
 
-  // Otherwise, just emit it as a scalar.
+    // Otherwise, just emit it as a scalar.
   } else {
     Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
 
     QualType srcAddrType =
-      CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
+        CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
     srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
   }
   Address srcAddr = srcLV.getAddress();
@@ -4451,8 +4453,8 @@ static void emitWritebackArg(CodeGenFunction &CGF, 
CallArgList &args,
 
     // Make a phi for the value to intrinsically use.
     if (valueToUse) {
-      llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
-                                                      "icr.to-use");
+      llvm::PHINode *phiToUse =
+          CGF.Builder.CreatePHI(valueToUse->getType(), 2, "icr.to-use");
       phiToUse->addIncoming(valueToUse, copyBB);
       phiToUse->addIncoming(llvm::PoisonValue::get(valueToUse->getType()),
                             originBB);
@@ -4482,8 +4484,7 @@ void CallArgList::freeArgumentMemory(CodeGenFunction 
&CGF) const {
 
 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
                                           SourceLocation ArgLoc,
-                                          AbstractCallee AC,
-                                          unsigned ParmNum) {
+                                          AbstractCallee AC, unsigned ParmNum) 
{
   if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
                          SanOpts.has(SanitizerKind::NullabilityArg)))
     return;
@@ -4525,7 +4526,8 @@ void CodeGenFunction::EmitNonNullArgCheck(RValue RV, 
QualType ArgType,
   SanitizerScope SanScope(this);
   llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType);
   llvm::Constant *StaticData[] = {
-      EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
+      EmitCheckSourceLocation(ArgLoc),
+      EmitCheckSourceLocation(AttrLoc),
       llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
   };
   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, {});
@@ -4662,9 +4664,8 @@ void CodeGenFunction::EmitCallArgs(
     auto SizeTy = Context.getSizeType();
     auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
     assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
-    llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
-                                                     EmittedArg.getScalarVal(),
-                                                     PS->isDynamic());
+    llvm::Value *V = evaluateOrEmitBuiltinObjectSize(
+        Arg, PS->getType(), T, EmittedArg.getScalarVal(), PS->isDynamic());
     Args.add(RValue::get(V), SizeTy);
     // If we're emitting args in reverse, be sure to do so with
     // pass_object_size, as well.
@@ -4725,8 +4726,7 @@ void CodeGenFunction::EmitCallArgs(
 namespace {
 
 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
-  DestroyUnpassedArg(Address Addr, QualType Ty)
-      : Addr(Addr), Ty(Ty) {}
+  DestroyUnpassedArg(Address Addr, QualType Ty) : Addr(Addr), Ty(Ty) {}
 
   Address Addr;
   QualType Ty;
@@ -4794,8 +4794,8 @@ void CodeGenFunction::EmitWritebacks(const CallArgList 
&args) {
 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
                                   QualType type) {
   DisableDebugLocationUpdates Dis(*this, E);
-  if (const ObjCIndirectCopyRestoreExpr *CRE
-        = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
+  if (const ObjCIndirectCopyRestoreExpr *CRE =
+          dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
     assert(getLangOpts().ObjCAutoRefCount);
     return emitWritebackArg(*this, args, CRE);
   }
@@ -4826,7 +4826,8 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, 
const Expr *E,
     // If we're using inalloca, use the argument memory.  Otherwise, use a
     // temporary.
     AggValueSlot Slot = args.isUsingInAlloca()
-        ? createPlaceholderSlot(*this, type) : CreateAggTemp(type, "agg.tmp");
+                            ? createPlaceholderSlot(*this, type)
+                            : CreateAggTemp(type, "agg.tmp");
 
     bool DestroyedInCallee = true, NeedsCleanup = true;
     if (const auto *RD = type->getAsCXXRecordDecl())
@@ -4887,8 +4888,7 @@ QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
 
 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
 // optimizer it can aggressively ignore unwind edges.
-void
-CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
+void CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
@@ -4973,12 +4973,8 @@ void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
       getBundlesForFunclet(callee.getCallee());
 
   if (getInvokeDest()) {
-    llvm::InvokeInst *invoke =
-      Builder.CreateInvoke(callee,
-                           getUnreachableBlock(),
-                           getInvokeDest(),
-                           args,
-                           BundleList);
+    llvm::InvokeInst *invoke = Builder.CreateInvoke(
+        callee, getUnreachableBlock(), getInvokeDest(), args, BundleList);
     invoke->setDoesNotReturn();
     invoke->setCallingConv(getRuntimeCC());
   } else {
@@ -5384,17 +5380,16 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo 
&CallInfo,
             NeedCopy = true;
           }
           if (!getLangOpts().OpenCL) {
-            if ((isByValOrRef &&
-                (AS != LangAS::Default &&
-                 AS != CGM.getASTAllocaAddressSpace()))) {
+            if ((isByValOrRef && (AS != LangAS::Default &&
+                                  AS != CGM.getASTAllocaAddressSpace()))) {
               NeedCopy = true;
             }
           }
           // For OpenCL even if RV is located in default or alloca address 
space
           // we don't want to perform address space cast for it.
-          else if ((isByValOrRef &&
-                    Addr.getType()->getAddressSpace() != IRFuncTy->
-                      getParamType(FirstIRArg)->getPointerAddressSpace())) {
+          else if ((isByValOrRef && Addr.getType()->getAddressSpace() !=
+                                        IRFuncTy->getParamType(FirstIRArg)
+                                            ->getPointerAddressSpace())) {
             NeedCopy = true;
           }
         }
@@ -5467,8 +5462,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo 
&CallInfo,
 
         // Implement swifterror by copying into a new swifterror argument.
         // We'll write back in the normal path out of the call.
-        if (CallInfo.getExtParameterInfo(ArgNo).getABI()
-              == ParameterABI::SwiftErrorResult) {
+        if (CallInfo.getExtParameterInfo(ArgNo).getABI() ==
+            ParameterABI::SwiftErrorResult) {
           assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
 
           QualType pointeeTy = I->Ty->getPointeeType();
@@ -5476,7 +5471,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo 
&CallInfo,
               V, pointeeTy, getContext().getTypeAlignInChars(pointeeTy));
 
           swiftErrorTemp =
-            CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
+              CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
           V = swiftErrorTemp.getPointer();
           cast<llvm::AllocaInst>(V)->setSwiftError(true);
 
@@ -5620,11 +5615,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo 
&CallInfo,
         auto scalarAlign = CGM.getDataLayout().getPrefTypeAlign(scalarType);
 
         // Materialize to a temporary.
-        addr = CreateTempAlloca(
-            RV.getScalarVal()->getType(),
-            CharUnits::fromQuantity(std::max(layout->getAlignment(), 
scalarAlign)),
-            "tmp",
-            /*ArraySize=*/nullptr, &AllocaAddr);
+        addr = CreateTempAlloca(RV.getScalarVal()->getType(),
+                                CharUnits::fromQuantity(std::max(
+                                    layout->getAlignment(), scalarAlign)),
+                                "tmp",
+                                /*ArraySize=*/nullptr, &AllocaAddr);
         tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
 
         Builder.CreateStore(RV.getScalarVal(), addr);
@@ -5636,7 +5631,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo 
&CallInfo,
       unsigned unpaddedIndex = 0;
       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
         llvm::Type *eltType = coercionType->getElementType(i);
-        if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
+        if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
+          continue;
         Address eltAddr = Builder.CreateStructGEP(addr, i);
         llvm::Value *elt = CreateCoercedLoad(
             eltAddr,
@@ -5870,7 +5866,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo 
&CallInfo,
   // Control Flow Guard checks should not be added, even if the call is 
inlined.
   if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
     if (const auto *A = FD->getAttr<CFGuardAttr>()) {
-      if (A->getGuard() == CFGuardAttr::GuardArg::nocf && 
!CI->getCalledFunction())
+      if (A->getGuard() == CFGuardAttr::GuardArg::nocf &&
+          !CI->getCalledFunction())
         Attrs = Attrs.addFnAttribute(getLLVMContext(), "guard_nocf");
     }
   }
@@ -5895,8 +5892,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo 
&CallInfo,
   // For more details, see the comment before the definition of
   // IPVK_IndirectCallTarget in InstrProfData.inc.
   if (!CI->getCalledFunction())
-    PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
-                     CI, CalleePtr);
+    PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, CI, CalleePtr);
 
   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
   // optimizer it can aggressively ignore unwind edges.
@@ -5939,8 +5935,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo 
&CallInfo,
   }
 
   // Add metadata for calls to MSAllocator functions
-  if (getDebugInfo() && TargetDecl &&
-      TargetDecl->hasAttr<MSAllocatorAttr>())
+  if (getDebugInfo() && TargetDecl && TargetDecl->hasAttr<MSAllocatorAttr>())
     getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc);
 
   // Add metadata if calling an __attribute__((error(""))) or warning fn.

_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to