wmi updated this revision to Diff 110641.
wmi added a comment.

Don't separate bitfield in the middle of a run because it is possible to hinder 
bitfields accesses combine. Only separate bitfield at the beginning of a run.

Repository:
  rL LLVM

https://reviews.llvm.org/D36562

Files:
  lib/CodeGen/CGRecordLayoutBuilder.cpp
  test/CodeGen/2009-12-07-BitFieldAlignment.c
  test/CodeGenCXX/2009-12-23-MissingSext.cpp
  test/CodeGenCXX/bitfield.cpp
  test/CodeGenCXX/pod-member-memcpys.cpp
  test/OpenMP/atomic_capture_codegen.cpp
  test/OpenMP/atomic_read_codegen.c
  test/OpenMP/atomic_update_codegen.cpp
  test/OpenMP/atomic_write_codegen.c

Index: test/OpenMP/atomic_write_codegen.c
===================================================================
--- test/OpenMP/atomic_write_codegen.c
+++ test/OpenMP/atomic_write_codegen.c
@@ -28,12 +28,14 @@
 int4 int4x;
 
 struct BitFields {
-  int : 32;
+  int : 2;
+  int : 30;
   int a : 31;
 } bfx;
 
 struct BitFields_packed {
-  int : 32;
+  int : 2;
+  int : 30;
   int a : 31;
 } __attribute__ ((__packed__)) bfx_packed;
 
@@ -58,13 +60,15 @@
 } __attribute__ ((__packed__)) bfx3_packed;
 
 struct BitFields4 {
-  short : 16;
+  short : 6;
+  short : 10;
   int a: 1;
   long b : 7;
 } bfx4;
 
 struct BitFields4_packed {
-  short : 16;
+  short : 6;
+  short : 10;
   int a: 1;
   long b : 7;
 } __attribute__ ((__packed__)) bfx4_packed;
Index: test/OpenMP/atomic_update_codegen.cpp
===================================================================
--- test/OpenMP/atomic_update_codegen.cpp
+++ test/OpenMP/atomic_update_codegen.cpp
@@ -27,12 +27,14 @@
 int4 int4x;
 
 struct BitFields {
-  int : 32;
+  int : 2;
+  int : 30;
   int a : 31;
 } bfx;
 
 struct BitFields_packed {
-  int : 32;
+  int : 2;
+  int : 30;
   int a : 31;
 } __attribute__ ((__packed__)) bfx_packed;
 
@@ -57,13 +59,15 @@
 } __attribute__ ((__packed__)) bfx3_packed;
 
 struct BitFields4 {
-  short : 16;
+  short : 6;
+  short : 10;
   int a: 1;
   long b : 7;
 } bfx4;
 
 struct BitFields4_packed {
-  short : 16;
+  short : 6;
+  short : 10;
   int a: 1;
   long b : 7;
 } __attribute__ ((__packed__)) bfx4_packed;
Index: test/OpenMP/atomic_read_codegen.c
===================================================================
--- test/OpenMP/atomic_read_codegen.c
+++ test/OpenMP/atomic_read_codegen.c
@@ -28,12 +28,14 @@
 int4 int4x;
 
 struct BitFields {
-  int : 32;
+  int : 2;
+  int : 30;
   int a : 31;
 } bfx;
 
 struct BitFields_packed {
-  int : 32;
+  int : 2;
+  int : 30;
   int a : 31;
 } __attribute__ ((__packed__)) bfx_packed;
 
@@ -58,13 +60,15 @@
 } __attribute__ ((__packed__)) bfx3_packed;
 
 struct BitFields4 {
-  short : 16;
+  short : 6;
+  short : 10;
   int a: 1;
   long b : 7;
 } bfx4;
 
 struct BitFields4_packed {
-  short : 16;
+  short : 6;
+  short : 10;
   int a: 1;
   long b : 7;
 } __attribute__ ((__packed__)) bfx4_packed;
Index: test/OpenMP/atomic_capture_codegen.cpp
===================================================================
--- test/OpenMP/atomic_capture_codegen.cpp
+++ test/OpenMP/atomic_capture_codegen.cpp
@@ -27,12 +27,14 @@
 int4 int4x;
 
 struct BitFields {
-  int : 32;
+  int : 2;
+  int : 30;
   int a : 31;
 } bfx;
 
 struct BitFields_packed {
-  int : 32;
+  int : 2;
+  int : 30;
   int a : 31;
 } __attribute__ ((__packed__)) bfx_packed;
 
@@ -57,13 +59,15 @@
 } __attribute__ ((__packed__)) bfx3_packed;
 
 struct BitFields4 {
-  short : 16;
+  short : 6;
+  short : 10;
   int a: 1;
   long b : 7;
 } bfx4;
 
 struct BitFields4_packed {
-  short : 16;
+  short : 6;
+  short : 10;
   int a: 1;
   long b : 7;
 } __attribute__ ((__packed__)) bfx4_packed;
Index: test/CodeGenCXX/pod-member-memcpys.cpp
===================================================================
--- test/CodeGenCXX/pod-member-memcpys.cpp
+++ test/CodeGenCXX/pod-member-memcpys.cpp
@@ -69,7 +69,7 @@
 
 struct BitfieldMember3 {
   virtual void f();
-  int   : 8;
+  int z : 8;
   int x : 1;
   int y;
 };
Index: test/CodeGenCXX/bitfield.cpp
===================================================================
--- test/CodeGenCXX/bitfield.cpp
+++ test/CodeGenCXX/bitfield.cpp
@@ -478,3 +478,42 @@
     s->b = x;
   }
 }
+
+namespace N8 {
+  // If a bitfield is at the beginning of a group of bitfields, has the width of legal integer type
+  // and it is natually aligned, the bitfield will be accessed like a separate memory location.
+  struct S {
+    unsigned b1 : 16;
+    unsigned b2 : 6;
+    unsigned b3 : 2;
+    unsigned b4 : 3;
+  };
+  unsigned read00(S* s) {
+    // CHECK-X86-64-LABEL: define i32 @_ZN2N86read00EPNS_1SE
+    // CHECK-X86-64:   %[[cast:.*]] = bitcast %{{.*}} to i16*
+    // CHECK-X86-64:   %[[val:.*]]  = load i16, i16* %[[cast]]
+    // CHECK-X86-64:   %[[ext:.*]]  = zext i16 %[[val]] to i32
+    // CHECK-X86-64:                  ret i32 %[[ext]]
+    // CHECK-PPC64-LABEL: define zeroext i32 @_ZN2N86read00EPNS_1SE
+    // CHECK-PPC64:   %[[val:.*]]   = load i32, i32* {{.*}}
+    // CHECK-PPC64:   %[[shr:.*]]   = lshr i32 %[[val]], 16
+    // CHECK-PPC64:                   ret i32 %[[shr]]
+    return s->b1;
+  }
+  void write00(S* s, unsigned x) {
+    // CHECK-X86-64-LABEL: define void @_ZN2N87write00EPNS_1SEj
+    // CHECK-X86-64:   %[[load:.*]] = load i32, i32* %{{.*}}
+    // CHECK-X86-64:   %[[cast:.*]] = bitcast %{{.*}} to i16*
+    // CHECK-X86-64:   %[[new:.*]] = trunc i32 %[[load]] to i16
+    // CHECK-X86-64:                 store i16 %[[new]], i16* %[[cast]]
+    // CHECK-PPC64-LABEL: define void @_ZN2N87write00EPNS_1SEj
+    // CHECK-PPC64:   %[[load1:.*]] = load i32, i32* {{.*}}
+    // CHECK-PPC64:   %[[load2:.*]] = load i32, i32* {{.*}}
+    // CHECK-PPC64:   %[[and1:.*]]  = and i32 %[[load1]], 65535
+    // CHECK-PPC64:   %[[shl:.*]]   = shl i32 %[[and1]], 16
+    // CHECK-PPC64:   %[[and2:.*]]  = and i32 %[[load2]], 65535
+    // CHECK-PPC64:   %[[or:.*]]    = or i32 %[[and2]], %[[shl]]
+    // CHECK-PPC64:                  store i32 %[[or]], i32* {{.*}}
+    s->b1 = x;
+  }
+}
Index: test/CodeGenCXX/2009-12-23-MissingSext.cpp
===================================================================
--- test/CodeGenCXX/2009-12-23-MissingSext.cpp
+++ test/CodeGenCXX/2009-12-23-MissingSext.cpp
@@ -3,7 +3,8 @@
 // getting extended to 32 bits, so uninitialized
 // bits of the temporary were used.  7366161.
 struct foo {
-  char x:8;
+  char x1:3;
+  char x2:5;
   signed int y:24;
 };
 int bar(struct foo p, int x) {
Index: test/CodeGen/2009-12-07-BitFieldAlignment.c
===================================================================
--- test/CodeGen/2009-12-07-BitFieldAlignment.c
+++ test/CodeGen/2009-12-07-BitFieldAlignment.c
@@ -4,7 +4,7 @@
 struct S {
   int a, b;
   void *c;
-  unsigned d : 8;
+  unsigned d : 7;
   unsigned e : 8;
 };
 
Index: lib/CodeGen/CGRecordLayoutBuilder.cpp
===================================================================
--- lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -403,6 +403,25 @@
     }
     return;
   }
+
+  // Check if current Field is better to be a single field run.
+  // When current field has legal integer width, and its bitfield offset is
+  // naturally aligned, it is better to make the bitfield a separate storage
+  // component so as it can be accessed directly.
+  // Note: We don't separate bitfield in the middle of a run of fields
+  // because it is possible to hinder bitfields accesses combine. We only
+  // separate bitfield at the beginning of a run.
+  auto betterBeSingleFieldRun = [&](RecordDecl::field_iterator Field) {
+    if (!DataLayout.isLegalInteger(Field->getBitWidthValue(Context)))
+      return false;
+    // Make sure Field is natually aligned if it is treated as an IType integer.
+    llvm::Type *IType = getIntNType(Field->getBitWidthValue(Context));
+    if (getFieldBitOffset(*Field) % Context.toBits(getAlignment(IType)) != 0)
+      return false;
+    return true;
+  };
+
+  bool SingleFieldRun = false;
   for (;;) {
     // Check to see if we need to start a new run.
     if (Run == FieldEnd) {
@@ -414,12 +433,15 @@
         Run = Field;
         StartBitOffset = getFieldBitOffset(*Field);
         Tail = StartBitOffset + Field->getBitWidthValue(Context);
+        SingleFieldRun = betterBeSingleFieldRun(Run);
       }
       ++Field;
       continue;
     }
+
     // Add bitfields to the run as long as they qualify.
-    if (Field != FieldEnd && Field->getBitWidthValue(Context) != 0 &&
+    if (!SingleFieldRun && Field != FieldEnd &&
+        Field->getBitWidthValue(Context) != 0 &&
         Tail == getFieldBitOffset(*Field)) {
       Tail += Field->getBitWidthValue(Context);
       ++Field;
@@ -435,6 +457,7 @@
       Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
                                    MemberInfo::Field, nullptr, *Run));
     Run = FieldEnd;
+    SingleFieldRun = false;
   }
 }
 
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to