dnsampaio updated this revision to Diff 243197.
dnsampaio added a comment.
Added opt-out flag
Repository:
rG LLVM Github Monorepo
CHANGES SINCE LAST ACTION
https://reviews.llvm.org/D72932/new/
https://reviews.llvm.org/D72932
Files:
clang/include/clang/Basic/CodeGenOptions.def
clang/include/clang/Driver/Options.td
clang/lib/CodeGen/CGExpr.cpp
clang/lib/CodeGen/CGRecordLayout.h
clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
clang/lib/Frontend/CompilerInvocation.cpp
clang/test/CodeGen/aapcs-bitfield.c
clang/test/CodeGen/bitfield-2.c
Index: clang/test/CodeGen/bitfield-2.c
===================================================================
--- clang/test/CodeGen/bitfield-2.c
+++ clang/test/CodeGen/bitfield-2.c
@@ -1,3 +1,4 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -emit-llvm -triple x86_64 -O3 -o %t.opt.ll %s \
// RUN: -fdump-record-layouts > %t.dump.txt
// RUN: FileCheck -check-prefix=CHECK-RECORD < %t.dump.txt %s
@@ -14,7 +15,7 @@
// CHECK-RECORD: LLVMType:%struct.s0 = type { [3 x i8] }
// CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[
-// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:24 IsSigned:1 StorageSize:24 StorageOffset:0>
+// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:24 IsSigned:1 StorageSize:24 StorageOffset:0
struct __attribute((packed)) s0 {
int f0 : 24;
};
@@ -54,8 +55,8 @@
// CHECK-RECORD: LLVMType:%struct.s1 = type { [3 x i8] }
// CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[
-// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:10 IsSigned:1 StorageSize:24 StorageOffset:0>
-// CHECK-RECORD: <CGBitFieldInfo Offset:10 Size:10 IsSigned:1 StorageSize:24 StorageOffset:0>
+// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:10 IsSigned:1 StorageSize:24 StorageOffset:0
+// CHECK-RECORD: <CGBitFieldInfo Offset:10 Size:10 IsSigned:1 StorageSize:24 StorageOffset:0
#pragma pack(push)
#pragma pack(1)
@@ -93,7 +94,6 @@
/****/
// Check that we don't access beyond the bounds of a union.
-//
// PR5567
// CHECK-RECORD: *** Dumping IRgen Record Layout
@@ -102,7 +102,7 @@
// CHECK-RECORD: LLVMType:%union.u2 = type { i8 }
// CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[
-// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:8 StorageOffset:0>
+// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:8 StorageOffset:0
union __attribute__((packed)) u2 {
unsigned long long f0 : 3;
@@ -267,15 +267,14 @@
/***/
// Check that we compute the best alignment possible for each access.
-//
// CHECK-RECORD: *** Dumping IRgen Record Layout
// CHECK-RECORD: Record: RecordDecl{{.*}}s7
// CHECK-RECORD: Layout: <CGRecordLayout
// CHECK-RECORD: LLVMType:%struct.s7 = type { i32, i32, i32, i8, i32, [12 x i8] }
// CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[
-// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageOffset:12>
-// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:29 IsSigned:1 StorageSize:32 StorageOffset:16>
+// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageOffset:12
+// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:29 IsSigned:1 StorageSize:32 StorageOffset:16
struct __attribute__((aligned(16))) s7 {
int a, b, c;
@@ -325,7 +324,6 @@
/***/
// This is another case where we narrow the access width immediately.
-//
// <rdar://problem/7893760>
struct __attribute__((packed)) s9 {
Index: clang/test/CodeGen/aapcs-bitfield.c
===================================================================
--- clang/test/CodeGen/aapcs-bitfield.c
+++ clang/test/CodeGen/aapcs-bitfield.c
@@ -1,8 +1,12 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// RUN: %clang_cc1 -triple armv8-none-linux-eabi %s -emit-llvm -o - -O3 | FileCheck %s -check-prefix=LE
-// RUN: %clang_cc1 -triple armebv8-none-linux-eabi %s -emit-llvm -o - -O3 | FileCheck %s -check-prefix=BE
-// RUN: %clang_cc1 -triple armv8-none-linux-eabi %s -emit-llvm -o - -O3 -fAAPCSBitfieldLoad | FileCheck %s -check-prefixes=LE,LENUMLOADS
-// RUN: %clang_cc1 -triple armebv8-none-linux-eabi %s -emit-llvm -o - -O3 -fAAPCSBitfieldLoad | FileCheck %s -check-prefixes=BE,BENUMLOADS
+// RUN: %clang_cc1 -triple armv8-none-linux-eabi %s -emit-llvm -o - -O3 -fno-AAPCSBitfieldWidth | FileCheck %s -check-prefix=LE
+// RUN: %clang_cc1 -triple armebv8-none-linux-eabi %s -emit-llvm -o - -O3 -fno-AAPCSBitfieldWidth | FileCheck %s -check-prefix=BE
+// RUN: %clang_cc1 -triple armv8-none-linux-eabi %s -emit-llvm -o - -O3 -fAAPCSBitfieldLoad -fno-AAPCSBitfieldWidth | FileCheck %s -check-prefixes=LE,LENUMLOADS
+// RUN: %clang_cc1 -triple armebv8-none-linux-eabi %s -emit-llvm -o - -O3 -fAAPCSBitfieldLoad -fno-AAPCSBitfieldWidth | FileCheck %s -check-prefixes=BE,BENUMLOADS
+// RUN: %clang_cc1 -triple armv8-none-linux-eabi %s -emit-llvm -o - -O3 | FileCheck %s -check-prefix=LEWIDTH
+// RUN: %clang_cc1 -triple armebv8-none-linux-eabi %s -emit-llvm -o - -O3 | FileCheck %s -check-prefix=BEWIDTH
+// RUN: %clang_cc1 -triple armv8-none-linux-eabi %s -emit-llvm -o - -O3 -fAAPCSBitfieldLoad | FileCheck %s -check-prefixes=LEWIDTH,LEWIDTHNUM
+// RUN: %clang_cc1 -triple armebv8-none-linux-eabi %s -emit-llvm -o - -O3 -fAAPCSBitfieldLoad | FileCheck %s -check-prefixes=BEWIDTH,BEWIDTHNUM
struct st0 {
short c : 7;
@@ -24,6 +28,22 @@
// BE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
// BE-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
// BE-NEXT: ret i32 [[CONV]]
+// LEWIDTH-LABEL: @st0_check_load(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
+// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
+// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 1
+// LEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// LEWIDTH-NEXT: ret i32 [[CONV]]
+//
+// BEWIDTH-LABEL: @st0_check_load(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
+// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
+// BEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// BEWIDTH-NEXT: ret i32 [[CONV]]
//
int st0_check_load(struct st0 *m) {
return m->c;
@@ -46,6 +66,23 @@
// BE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
// BE-NEXT: store i8 [[BF_SET]], i8* [[TMP0]], align 2
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @st0_check_store(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
+// LEWIDTH-NEXT: store i8 [[BF_SET]], i8* [[TMP0]], align 2
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @st0_check_store(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST0:%.*]], %struct.st0* [[M:%.*]], i32 0, i32 0
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, i8* [[TMP0]], align 2
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
+// BEWIDTH-NEXT: store i8 [[BF_SET]], i8* [[TMP0]], align 2
+// BEWIDTH-NEXT: ret void
//
void st0_check_store(struct st0 *m) {
m->c = 1;
@@ -72,6 +109,22 @@
// BE-NEXT: [[BF_ASHR:%.*]] = ashr exact i16 [[BF_SHL]], 10
// BE-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
// BE-NEXT: ret i32 [[CONV]]
+// LEWIDTH-LABEL: @st1_check_load(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 10
+// LEWIDTH-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
+// LEWIDTH-NEXT: ret i32 [[CONV]]
+//
+// BEWIDTH-LABEL: @st1_check_load(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 10
+// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr exact i16 [[BF_SHL]], 10
+// BEWIDTH-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
+// BEWIDTH-NEXT: ret i32 [[CONV]]
//
int st1_check_load(struct st1 *m) {
return m->c;
@@ -94,6 +147,23 @@
// BE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
// BE-NEXT: store i16 [[BF_SET]], i16* [[TMP0]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @st1_check_store(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 1023
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1024
+// LEWIDTH-NEXT: store i16 [[BF_SET]], i16* [[TMP0]], align 4
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @st1_check_store(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST1:%.*]], %struct.st1* [[M:%.*]], i32 0, i32 0
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -64
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
+// BEWIDTH-NEXT: store i16 [[BF_SET]], i16* [[TMP0]], align 4
+// BEWIDTH-NEXT: ret void
//
void st1_check_store(struct st1 *m) {
m->c = 1;
@@ -120,6 +190,22 @@
// BE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
// BE-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
// BE-NEXT: ret i32 [[CONV]]
+// LEWIDTH-LABEL: @st2_check_load(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
+// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
+// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 1
+// LEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// LEWIDTH-NEXT: ret i32 [[CONV]]
+//
+// BEWIDTH-LABEL: @st2_check_load(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
+// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
+// BEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// BEWIDTH-NEXT: ret i32 [[CONV]]
//
int st2_check_load(struct st2 *m) {
return m->c;
@@ -142,6 +228,23 @@
// BE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
// BE-NEXT: store i8 [[BF_SET]], i8* [[C]], align 2
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @st2_check_store(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -128
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
+// LEWIDTH-NEXT: store i8 [[BF_SET]], i8* [[C]], align 2
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @st2_check_store(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST2:%.*]], %struct.st2* [[M:%.*]], i32 0, i32 1
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i8, i8* [[C]], align 2
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 1
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
+// BEWIDTH-NEXT: store i8 [[BF_SET]], i8* [[C]], align 2
+// BEWIDTH-NEXT: ret void
//
void st2_check_store(struct st2 *m) {
m->c = 1;
@@ -167,6 +270,22 @@
// BE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 1
// BE-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
// BE-NEXT: ret i32 [[CONV]]
+// LEWIDTH-LABEL: @st3_check_load(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st3* [[M:%.*]] to i16*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 2
+// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 9
+// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr exact i16 [[BF_SHL]], 9
+// LEWIDTH-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
+// LEWIDTH-NEXT: ret i32 [[CONV]]
+//
+// BEWIDTH-LABEL: @st3_check_load(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st3* [[M:%.*]] to i16*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 2
+// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 9
+// BEWIDTH-NEXT: [[CONV:%.*]] = sext i16 [[BF_ASHR]] to i32
+// BEWIDTH-NEXT: ret i32 [[CONV]]
//
int st3_check_load(struct st3 *m) {
return m->c;
@@ -189,6 +308,23 @@
// BE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
// BE-NEXT: store volatile i8 [[BF_SET]], i8* [[TMP0]], align 2
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @st3_check_store(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st3* [[M:%.*]] to i16*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 2
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -128
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
+// LEWIDTH-NEXT: store volatile i16 [[BF_SET]], i16* [[TMP0]], align 2
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @st3_check_store(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st3* [[M:%.*]] to i16*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 2
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 511
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 512
+// BEWIDTH-NEXT: store volatile i16 [[BF_SET]], i16* [[TMP0]], align 2
+// BEWIDTH-NEXT: ret void
//
void st3_check_store(struct st3 *m) {
m->c = 1;
@@ -220,6 +356,25 @@
// BE-NEXT: [[SEXT:%.*]] = shl i32 [[BF_CAST]], 24
// BE-NEXT: [[CONV:%.*]] = ashr exact i32 [[SEXT]], 24
// BE-NEXT: ret i32 [[CONV]]
+// LEWIDTH-LABEL: @st4_check_load(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st4* [[M:%.*]] to i8*
+// LEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i32 1
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP1]], align 1
+// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 2
+// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
+// LEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// LEWIDTH-NEXT: ret i32 [[CONV]]
+//
+// BEWIDTH-LABEL: @st4_check_load(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st4* [[M:%.*]] to i8*
+// BEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i32 1
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP1]], align 1
+// BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 1
+// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
+// BEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// BEWIDTH-NEXT: ret i32 [[CONV]]
//
int st4_check_load(struct st4 *m) {
return m->c;
@@ -242,6 +397,25 @@
// BE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 4
// BE-NEXT: store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @st4_check_store(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st4* [[M:%.*]] to i8*
+// LEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i32 1
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP1]], align 1
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -63
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 2
+// LEWIDTH-NEXT: store volatile i8 [[BF_SET]], i8* [[TMP1]], align 1
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @st4_check_store(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st4* [[M:%.*]] to i8*
+// BEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i32 1
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP1]], align 1
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -125
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 4
+// BEWIDTH-NEXT: store volatile i8 [[BF_SET]], i8* [[TMP1]], align 1
+// BEWIDTH-NEXT: ret void
//
void st4_check_store(struct st4 *m) {
m->c = 1;
@@ -264,6 +438,23 @@
// BE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 128
// BE-NEXT: store i16 [[BF_SET]], i16* [[TMP0]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @st4_check_nonv_store(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -512
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
+// LEWIDTH-NEXT: store i16 [[BF_SET]], i16* [[TMP0]], align 4
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @st4_check_nonv_store(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST4:%.*]], %struct.st4* [[M:%.*]], i32 0, i32 0
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 127
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 128
+// BEWIDTH-NEXT: store i16 [[BF_SET]], i16* [[TMP0]], align 4
+// BEWIDTH-NEXT: ret void
//
void st4_check_nonv_store(struct st4 *m) {
m->b = 1;
@@ -290,6 +481,22 @@
// BE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
// BE-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
// BE-NEXT: ret i32 [[CONV]]
+// LEWIDTH-LABEL: @st5_check_load(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
+// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
+// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 3
+// LEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// LEWIDTH-NEXT: ret i32 [[CONV]]
+//
+// BEWIDTH-LABEL: @st5_check_load(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
+// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
+// BEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
+// BEWIDTH-NEXT: ret i32 [[CONV]]
//
int st5_check_load(struct st5 *m) {
return m->c;
@@ -312,6 +519,23 @@
// BE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
// BE-NEXT: store volatile i8 [[BF_SET]], i8* [[C]], align 2
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @st5_check_store(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
+// LEWIDTH-NEXT: store volatile i8 [[BF_SET]], i8* [[C]], align 2
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @st5_check_store(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], %struct.st5* [[M:%.*]], i32 0, i32 1
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[C]], align 2
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
+// BEWIDTH-NEXT: store volatile i8 [[BF_SET]], i8* [[C]], align 2
+// BEWIDTH-NEXT: ret void
//
void st5_check_store(struct st5 *m) {
m->c = 1;
@@ -358,6 +582,41 @@
// BE-NEXT: [[BF_CAST3:%.*]] = sext i8 [[BF_ASHR2]] to i32
// BE-NEXT: [[ADD4:%.*]] = add nsw i32 [[ADD]], [[BF_CAST3]]
// BE-NEXT: ret i32 [[ADD4]]
+// LEWIDTH-LABEL: @st6_check_load(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 4
+// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr exact i16 [[BF_SHL]], 4
+// LEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
+// LEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
+// LEWIDTH-NEXT: [[TMP1:%.*]] = load volatile i8, i8* [[B]], align 2
+// LEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// LEWIDTH-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
+// LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
+// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[C]], align 1
+// LEWIDTH-NEXT: [[BF_SHL2:%.*]] = shl i8 [[BF_LOAD1]], 3
+// LEWIDTH-NEXT: [[BF_ASHR3:%.*]] = ashr exact i8 [[BF_SHL2]], 3
+// LEWIDTH-NEXT: [[BF_CAST4:%.*]] = sext i8 [[BF_ASHR3]] to i32
+// LEWIDTH-NEXT: [[ADD5:%.*]] = add nsw i32 [[ADD]], [[BF_CAST4]]
+// LEWIDTH-NEXT: ret i32 [[ADD5]]
+//
+// BEWIDTH-LABEL: @st6_check_load(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4
+// BEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
+// BEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
+// BEWIDTH-NEXT: [[TMP1:%.*]] = load volatile i8, i8* [[B]], align 2
+// BEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32
+// BEWIDTH-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]]
+// BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
+// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[C]], align 1
+// BEWIDTH-NEXT: [[BF_ASHR2:%.*]] = ashr i8 [[BF_LOAD1]], 3
+// BEWIDTH-NEXT: [[BF_CAST3:%.*]] = sext i8 [[BF_ASHR2]] to i32
+// BEWIDTH-NEXT: [[ADD4:%.*]] = add nsw i32 [[ADD]], [[BF_CAST3]]
+// BEWIDTH-NEXT: ret i32 [[ADD4]]
//
int st6_check_load(volatile struct st6 *m) {
int x = m->a;
@@ -397,6 +656,37 @@
// BE-NEXT: [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 24
// BE-NEXT: store i8 [[BF_SET3]], i8* [[C]], align 1
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @st6_check_store(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], -4096
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1
+// LEWIDTH-NEXT: store i16 [[BF_SET]], i16* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
+// LEWIDTH-NEXT: store i8 2, i8* [[B]], align 2
+// LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
+// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i8, i8* [[C]], align 1
+// LEWIDTH-NEXT: [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], -32
+// LEWIDTH-NEXT: [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 3
+// LEWIDTH-NEXT: store i8 [[BF_SET3]], i8* [[C]], align 1
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @st6_check_store(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i16 [[BF_LOAD]], 15
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 16
+// BEWIDTH-NEXT: store i16 [[BF_SET]], i16* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1
+// BEWIDTH-NEXT: store i8 2, i8* [[B]], align 2
+// BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2
+// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load i8, i8* [[C]], align 1
+// BEWIDTH-NEXT: [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], 7
+// BEWIDTH-NEXT: [[BF_SET3:%.*]] = or i8 [[BF_CLEAR2]], 24
+// BEWIDTH-NEXT: store i8 [[BF_SET3]], i8* [[C]], align 1
+// BEWIDTH-NEXT: ret void
//
void st6_check_store(struct st6 *m) {
m->a = 1;
@@ -447,6 +737,38 @@
// BE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
// BE-NEXT: [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
// BE-NEXT: ret i32 [[ADD3]]
+// LEWIDTH-LABEL: @st7_check_load(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
+// LEWIDTH-NEXT: [[TMP0:%.*]] = load i8, i8* [[X]], align 4
+// LEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
+// LEWIDTH-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
+// LEWIDTH-NEXT: [[TMP1:%.*]] = load volatile i8, i8* [[A]], align 4
+// LEWIDTH-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
+// LEWIDTH-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], [[CONV]]
+// LEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
+// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
+// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 3
+// LEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
+// LEWIDTH-NEXT: [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
+// LEWIDTH-NEXT: ret i32 [[ADD3]]
+//
+// BEWIDTH-LABEL: @st7_check_load(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
+// BEWIDTH-NEXT: [[TMP0:%.*]] = load i8, i8* [[X]], align 4
+// BEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32
+// BEWIDTH-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
+// BEWIDTH-NEXT: [[TMP1:%.*]] = load volatile i8, i8* [[A]], align 4
+// BEWIDTH-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1]] to i32
+// BEWIDTH-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], [[CONV]]
+// BEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
+// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
+// BEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32
+// BEWIDTH-NEXT: [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]]
+// BEWIDTH-NEXT: ret i32 [[ADD3]]
//
int st7_check_load(struct st7b *m) {
int r = m->x;
@@ -480,6 +802,31 @@
// BE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 24
// BE-NEXT: store volatile i8 [[BF_SET]], i8* [[B]], align 1
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @st7_check_store(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
+// LEWIDTH-NEXT: store i8 1, i8* [[X]], align 4
+// LEWIDTH-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
+// LEWIDTH-NEXT: store volatile i8 2, i8* [[A]], align 4
+// LEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 3
+// LEWIDTH-NEXT: store volatile i8 [[BF_SET]], i8* [[B]], align 1
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @st7_check_store(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0
+// BEWIDTH-NEXT: store i8 1, i8* [[X]], align 4
+// BEWIDTH-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0
+// BEWIDTH-NEXT: store volatile i8 2, i8* [[A]], align 4
+// BEWIDTH-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 24
+// BEWIDTH-NEXT: store volatile i8 [[BF_SET]], i8* [[B]], align 1
+// BEWIDTH-NEXT: ret void
//
void st7_check_store(struct st7b *m) {
m->x = 1;
@@ -503,6 +850,17 @@
// BE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST8:%.*]], %struct.st8* [[M:%.*]], i32 0, i32 0
// BE-NEXT: store i16 -1, i16* [[TMP0]], align 4
// BE-NEXT: ret i32 65535
+// LEWIDTH-LABEL: @st8_check_assignment(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST8:%.*]], %struct.st8* [[M:%.*]], i32 0, i32 0
+// LEWIDTH-NEXT: store i16 -1, i16* [[TMP0]], align 4
+// LEWIDTH-NEXT: ret i32 65535
+//
+// BEWIDTH-LABEL: @st8_check_assignment(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST8:%.*]], %struct.st8* [[M:%.*]], i32 0, i32 0
+// BEWIDTH-NEXT: store i16 -1, i16* [[TMP0]], align 4
+// BEWIDTH-NEXT: ret i32 65535
//
int st8_check_assignment(struct st8 *m) {
return m->f = 0xffff;
@@ -525,6 +883,20 @@
// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
// BE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_LOAD]] to i32
// BE-NEXT: ret i32 [[BF_CAST]]
+// LEWIDTH-LABEL: @read_st9(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st9* [[M:%.*]] to i32*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 24
+// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr exact i32 [[BF_SHL]], 24
+// LEWIDTH-NEXT: ret i32 [[BF_ASHR]]
+//
+// BEWIDTH-LABEL: @read_st9(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st9* [[M:%.*]] to i32*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_LOAD]], 24
+// BEWIDTH-NEXT: ret i32 [[BF_ASHR]]
//
int read_st9(volatile struct st9 *m) {
return m->f;
@@ -543,6 +915,23 @@
// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4
// BE-NEXT: store volatile i8 1, i8* [[TMP0]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @store_st9(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st9* [[M:%.*]] to i32*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -256
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 1
+// LEWIDTH-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @store_st9(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st9* [[M:%.*]] to i32*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], 16777215
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 16777216
+// BEWIDTH-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: ret void
//
void store_st9(volatile struct st9 *m) {
m->f = 1;
@@ -565,6 +954,29 @@
// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 4
// BE-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_st9(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st9* [[M:%.*]] to i32*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[INC:%.*]] = add i32 [[BF_LOAD]], 1
+// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 255
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -256
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
+// LEWIDTH-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_st9(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st9* [[M:%.*]] to i32*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[TMP1:%.*]] = add i32 [[BF_LOAD]], 16777216
+// BEWIDTH-NEXT: [[BF_SHL:%.*]] = and i32 [[TMP1]], -16777216
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 16777215
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
+// BEWIDTH-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: ret void
//
void increment_st9(volatile struct st9 *m) {
++m->f;
@@ -592,6 +1004,21 @@
// BE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_SHL]], 8
// BE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32
// BE-NEXT: ret i32 [[BF_CAST]]
+// LEWIDTH-LABEL: @read_st10(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st10* [[M:%.*]] to i32*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 23
+// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
+// LEWIDTH-NEXT: ret i32 [[BF_ASHR]]
+//
+// BEWIDTH-LABEL: @read_st10(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st10* [[M:%.*]] to i32*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 1
+// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 24
+// BEWIDTH-NEXT: ret i32 [[BF_ASHR]]
//
int read_st10(volatile struct st10 *m) {
return m->f;
@@ -614,6 +1041,23 @@
// BE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 128
// BE-NEXT: store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @store_st10(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st10* [[M:%.*]] to i32*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -511
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 2
+// LEWIDTH-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @store_st10(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st10* [[M:%.*]] to i32*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -2139095041
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 8388608
+// BEWIDTH-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: ret void
//
void store_st10(volatile struct st10 *m) {
m->f = 1;
@@ -642,6 +1086,29 @@
// BE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], [[BF_SHL2]]
// BE-NEXT: store volatile i16 [[BF_SET]], i16* [[TMP0]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_st10(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st10* [[M:%.*]] to i32*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[INC3:%.*]] = add i32 [[BF_LOAD]], 2
+// LEWIDTH-NEXT: [[BF_SHL2:%.*]] = and i32 [[INC3]], 510
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -511
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
+// LEWIDTH-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_st10(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st10* [[M:%.*]] to i32*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[INC3:%.*]] = add i32 [[BF_LOAD]], 8388608
+// BEWIDTH-NEXT: [[BF_SHL2:%.*]] = and i32 [[INC3]], 2139095040
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -2139095041
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
+// BEWIDTH-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: ret void
//
void increment_st10(volatile struct st10 *m) {
++m->f;
@@ -665,6 +1132,19 @@
// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
// BE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
// BE-NEXT: ret i32 [[BF_CAST]]
+// LEWIDTH-LABEL: @read_st11(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
+// LEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
+// LEWIDTH-NEXT: ret i32 [[BF_CAST]]
+//
+// BEWIDTH-LABEL: @read_st11(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
+// BEWIDTH-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_LOAD]] to i32
+// BEWIDTH-NEXT: ret i32 [[BF_CAST]]
//
int read_st11(volatile struct st11 *m) {
return m->f;
@@ -683,6 +1163,19 @@
// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
// BE-NEXT: store volatile i16 1, i16* [[F]], align 1
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @store_st11(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
+// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
+// LEWIDTH-NEXT: store volatile i16 1, i16* [[F]], align 1
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @store_st11(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
+// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
+// BEWIDTH-NEXT: store volatile i16 1, i16* [[F]], align 1
+// BEWIDTH-NEXT: ret void
//
void store_st11(volatile struct st11 *m) {
m->f = 1;
@@ -705,6 +1198,23 @@
// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, i16* [[F]], align 1
// BE-NEXT: store volatile i16 [[INC]], i16* [[F]], align 1
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_st11(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
+// LEWIDTH-NEXT: [[INC:%.*]] = add i16 [[BF_LOAD]], 1
+// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, i16* [[F]], align 1
+// LEWIDTH-NEXT: store volatile i16 [[INC]], i16* [[F]], align 1
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_st11(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1
+// BEWIDTH-NEXT: [[INC:%.*]] = add i16 [[BF_LOAD]], 1
+// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, i16* [[F]], align 1
+// BEWIDTH-NEXT: store volatile i16 [[INC]], i16* [[F]], align 1
+// BEWIDTH-NEXT: ret void
//
void increment_st11(volatile struct st11 *m) {
++m->f;
@@ -725,6 +1235,21 @@
// BE-NEXT: [[INC:%.*]] = add i8 [[TMP0]], 1
// BE-NEXT: store volatile i8 [[INC]], i8* [[E]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_e_st11(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 0
+// LEWIDTH-NEXT: [[TMP0:%.*]] = load volatile i8, i8* [[E]], align 4
+// LEWIDTH-NEXT: [[INC:%.*]] = add i8 [[TMP0]], 1
+// LEWIDTH-NEXT: store volatile i8 [[INC]], i8* [[E]], align 4
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_e_st11(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 0
+// BEWIDTH-NEXT: [[TMP0:%.*]] = load volatile i8, i8* [[E]], align 4
+// BEWIDTH-NEXT: [[INC:%.*]] = add i8 [[TMP0]], 1
+// BEWIDTH-NEXT: store volatile i8 [[INC]], i8* [[E]], align 4
+// BEWIDTH-NEXT: ret void
//
void increment_e_st11(volatile struct st11 *m) {
++m->e;
@@ -750,6 +1275,21 @@
// BE-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
// BE-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
// BE-NEXT: ret i32 [[BF_ASHR]]
+// LEWIDTH-LABEL: @read_st12(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
+// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
+// LEWIDTH-NEXT: ret i32 [[BF_ASHR]]
+//
+// BEWIDTH-LABEL: @read_st12(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 8
+// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 16
+// BEWIDTH-NEXT: ret i32 [[BF_ASHR]]
//
int read_st12(volatile struct st12 *m) {
return m->f;
@@ -772,6 +1312,23 @@
// BE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
// BE-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @store_st12(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16776961
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
+// LEWIDTH-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @store_st12(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -16776961
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 256
+// BEWIDTH-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: ret void
//
void store_st12(volatile struct st12 *m) {
m->f = 1;
@@ -800,6 +1357,29 @@
// BE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
// BE-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_st12(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[INC3:%.*]] = add i32 [[BF_LOAD]], 256
+// LEWIDTH-NEXT: [[BF_SHL2:%.*]] = and i32 [[INC3]], 16776960
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
+// LEWIDTH-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_st12(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[INC3:%.*]] = add i32 [[BF_LOAD]], 256
+// BEWIDTH-NEXT: [[BF_SHL2:%.*]] = and i32 [[INC3]], 16776960
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -16776961
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL2]]
+// BEWIDTH-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: ret void
//
void increment_st12(volatile struct st12 *m) {
++m->f;
@@ -828,6 +1408,29 @@
// BE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
// BE-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_e_st12(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[INC:%.*]] = add i32 [[BF_LOAD]], 1
+// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 255
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -256
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
+// LEWIDTH-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_e_st12(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st12* [[M:%.*]] to i32*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[TMP1:%.*]] = add i32 [[BF_LOAD]], 16777216
+// BEWIDTH-NEXT: [[BF_SHL:%.*]] = and i32 [[TMP1]], -16777216
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 16777215
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
+// BEWIDTH-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: ret void
//
void increment_e_st12(volatile struct st12 *m) {
++m->e;
@@ -865,6 +1468,33 @@
// BE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[TMP1]]
// BE-NEXT: store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_b_st13(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st13* [[S:%.*]] to i40*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
+// LEWIDTH-NEXT: [[TMP1:%.*]] = lshr i40 [[BF_LOAD]], 8
+// LEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i40 [[TMP1]] to i32
+// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
+// LEWIDTH-NEXT: [[TMP2:%.*]] = zext i32 [[INC]] to i40
+// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
+// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl nuw i40 [[TMP2]], 8
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i40 [[BF_SHL]], [[BF_CLEAR]]
+// LEWIDTH-NEXT: store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_b_st13(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st13* [[S:%.*]] to i40*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
+// BEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_LOAD]] to i32
+// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
+// BEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
+// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[TMP1]]
+// BEWIDTH-NEXT: store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
+// BEWIDTH-NEXT: ret void
//
void increment_b_st13(volatile struct st13 *s) {
s->b++;
@@ -891,6 +1521,23 @@
// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1
// BE-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_a_st14(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST14:%.*]], %struct.st14* [[S:%.*]], i32 0, i32 0
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
+// LEWIDTH-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1
+// LEWIDTH-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_a_st14(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST14:%.*]], %struct.st14* [[S:%.*]], i32 0, i32 0
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
+// BEWIDTH-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1
+// BEWIDTH-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1
+// BEWIDTH-NEXT: ret void
//
void increment_a_st14(volatile struct st14 *s) {
s->a++;
@@ -917,6 +1564,23 @@
// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1
// BE-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_a_st15(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST15:%.*]], %struct.st15* [[S:%.*]], i32 0, i32 0
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
+// LEWIDTH-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1
+// LEWIDTH-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_a_st15(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST15:%.*]], %struct.st15* [[S:%.*]], i32 0, i32 0
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
+// BEWIDTH-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1
+// BEWIDTH-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1
+// BEWIDTH-NEXT: ret void
//
void increment_a_st15(volatile struct st15 *s) {
s->a++;
@@ -954,6 +1618,31 @@
// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]]
// BE-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_a_st16(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_LOAD]] to i32
+// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
+// LEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294967296
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[TMP1]]
+// LEWIDTH-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_a_st16(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
+// BEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[TMP1]] to i32
+// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
+// BEWIDTH-NEXT: [[TMP2:%.*]] = zext i32 [[INC]] to i64
+// BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl nuw i64 [[TMP2]], 32
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], 4294967295
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]]
+// BEWIDTH-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4
+// BEWIDTH-NEXT: ret void
//
void increment_a_st16(struct st16 *s) {
s->a++;
@@ -986,6 +1675,33 @@
// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
// BE-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_b_st16(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
+// LEWIDTH-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
+// LEWIDTH-NEXT: [[INC:%.*]] = add i32 [[TMP2]], 1
+// LEWIDTH-NEXT: [[TMP3:%.*]] = and i32 [[INC]], 65535
+// LEWIDTH-NEXT: [[BF_VALUE:%.*]] = zext i32 [[TMP3]] to i64
+// LEWIDTH-NEXT: [[BF_SHL2:%.*]] = shl nuw nsw i64 [[BF_VALUE]], 32
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -281470681743361
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_SHL2]], [[BF_CLEAR]]
+// LEWIDTH-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_b_st16(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[TMP1:%.*]] = trunc i64 [[BF_LOAD]] to i32
+// BEWIDTH-NEXT: [[INC4:%.*]] = add i32 [[TMP1]], 65536
+// BEWIDTH-NEXT: [[TMP2:%.*]] = and i32 [[INC4]], -65536
+// BEWIDTH-NEXT: [[BF_SHL2:%.*]] = zext i32 [[TMP2]] to i64
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294901761
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
+// BEWIDTH-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4
+// BEWIDTH-NEXT: ret void
//
void increment_b_st16(struct st16 *s) {
s->b++;
@@ -1018,6 +1734,33 @@
// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]]
// BE-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_c_st16(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast i48* [[C]] to i64*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_LOAD]] to i32
+// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
+// LEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294967296
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[TMP1]]
+// LEWIDTH-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_c_st16(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast i48* [[C]] to i64*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
+// BEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i64 [[TMP1]] to i32
+// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
+// BEWIDTH-NEXT: [[TMP2:%.*]] = zext i32 [[INC]] to i64
+// BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl nuw i64 [[TMP2]], 32
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], 4294967295
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]]
+// BEWIDTH-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4
+// BEWIDTH-NEXT: ret void
//
void increment_c_st16(struct st16 *s) {
s->c++;
@@ -1052,6 +1795,35 @@
// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
// BE-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_d_st16(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast i48* [[D]] to i64*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32
+// LEWIDTH-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
+// LEWIDTH-NEXT: [[INC:%.*]] = add i32 [[TMP2]], 1
+// LEWIDTH-NEXT: [[TMP3:%.*]] = and i32 [[INC]], 65535
+// LEWIDTH-NEXT: [[BF_VALUE:%.*]] = zext i32 [[TMP3]] to i64
+// LEWIDTH-NEXT: [[BF_SHL2:%.*]] = shl nuw nsw i64 [[BF_VALUE]], 32
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -281470681743361
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_SHL2]], [[BF_CLEAR]]
+// LEWIDTH-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_d_st16(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast i48* [[D]] to i64*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[TMP1:%.*]] = trunc i64 [[BF_LOAD]] to i32
+// BEWIDTH-NEXT: [[INC4:%.*]] = add i32 [[TMP1]], 65536
+// BEWIDTH-NEXT: [[TMP2:%.*]] = and i32 [[INC4]], -65536
+// BEWIDTH-NEXT: [[BF_SHL2:%.*]] = zext i32 [[TMP2]] to i64
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294901761
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
+// BEWIDTH-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4
+// BEWIDTH-NEXT: ret void
//
void increment_d_st16(struct st16 *s) {
s->d++;
@@ -1084,6 +1856,23 @@
// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]]
// BE-NEXT: store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_v_a_st16(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i32*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: store volatile i32 [[INC]], i32* [[TMP0]], align 4
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_v_a_st16(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i32*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: store volatile i32 [[INC]], i32* [[TMP0]], align 4
+// BEWIDTH-NEXT: ret void
//
void increment_v_a_st16(volatile struct st16 *s) {
s->a++;
@@ -1118,6 +1907,31 @@
// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
// BE-NEXT: store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_v_b_st16(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i32*
+// LEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 1
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP1]], align 4
+// LEWIDTH-NEXT: [[INC:%.*]] = add i32 [[BF_LOAD]], 1
+// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP1]], align 4
+// LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
+// LEWIDTH-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP1]], align 4
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_v_b_st16(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i32*
+// BEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 1
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP1]], align 4
+// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP1]], align 4
+// BEWIDTH-NEXT: [[TMP2:%.*]] = add i32 [[BF_LOAD]], 65536
+// BEWIDTH-NEXT: [[BF_SHL:%.*]] = and i32 [[TMP2]], -65536
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
+// BEWIDTH-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP1]], align 4
+// BEWIDTH-NEXT: ret void
//
void increment_v_b_st16(volatile struct st16 *s) {
s->b++;
@@ -1152,6 +1966,25 @@
// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]]
// BE-NEXT: store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_v_c_st16(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
+// LEWIDTH-NEXT: [[TMP1:%.*]] = bitcast i48* [[TMP0]] to i32*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP1]], align 4
+// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP1]], align 4
+// LEWIDTH-NEXT: store volatile i32 [[INC]], i32* [[TMP1]], align 4
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_v_c_st16(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1
+// BEWIDTH-NEXT: [[TMP1:%.*]] = bitcast i48* [[TMP0]] to i32*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP1]], align 4
+// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_LOAD]], 1
+// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP1]], align 4
+// BEWIDTH-NEXT: store volatile i32 [[INC]], i32* [[TMP1]], align 4
+// BEWIDTH-NEXT: ret void
//
void increment_v_c_st16(volatile struct st16 *s) {
s->c++;
@@ -1188,6 +2021,31 @@
// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]]
// BE-NEXT: store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_v_d_st16(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i32*
+// LEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 3
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP1]], align 4
+// LEWIDTH-NEXT: [[INC:%.*]] = add i32 [[BF_LOAD]], 1
+// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP1]], align 4
+// LEWIDTH-NEXT: [[BF_VALUE:%.*]] = and i32 [[INC]], 65535
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], -65536
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
+// LEWIDTH-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP1]], align 4
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_v_d_st16(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i32*
+// BEWIDTH-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 3
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i32, i32* [[TMP1]], align 4
+// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i32, i32* [[TMP1]], align 4
+// BEWIDTH-NEXT: [[TMP2:%.*]] = add i32 [[BF_LOAD]], 65536
+// BEWIDTH-NEXT: [[BF_SHL:%.*]] = and i32 [[TMP2]], -65536
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD1]], 65535
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], [[BF_SHL]]
+// BEWIDTH-NEXT: store volatile i32 [[BF_SET]], i32* [[TMP1]], align 4
+// BEWIDTH-NEXT: ret void
//
void increment_v_d_st16(volatile struct st16 *s) {
s->d++;
@@ -1226,6 +2084,33 @@
// BE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_SHL]], [[BF_CLEAR]]
// BE-NEXT: store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_v_b_st17(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st17* [[S:%.*]] to i40*
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
+// LEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_LOAD]] to i32
+// LEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
+// LEWIDTH-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40
+// LEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
+// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296
+// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[TMP1]]
+// LEWIDTH-NEXT: store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_v_b_st17(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = bitcast %struct.st17* [[S:%.*]] to i40*
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1
+// BEWIDTH-NEXT: [[TMP1:%.*]] = lshr i40 [[BF_LOAD]], 8
+// BEWIDTH-NEXT: [[BF_CAST:%.*]] = trunc i40 [[TMP1]] to i32
+// BEWIDTH-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1
+// BEWIDTH-NEXT: [[TMP2:%.*]] = zext i32 [[INC]] to i40
+// BEWIDTH-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1
+// BEWIDTH-NEXT: [[BF_SHL:%.*]] = shl nuw i40 [[TMP2]], 8
+// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255
+// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i40 [[BF_SHL]], [[BF_CLEAR]]
+// BEWIDTH-NEXT: store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
+// BEWIDTH-NEXT: ret void
//
void increment_v_b_st17(volatile struct st17 *s) {
s->b++;
@@ -1258,6 +2143,23 @@
// BE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[TMP1]]
// BE-NEXT: store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1
// BE-NEXT: ret void
+// LEWIDTH-LABEL: @increment_v_c_st17(
+// LEWIDTH-NEXT: entry:
+// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], %struct.st17* [[S:%.*]], i32 0, i32 0, i32 4
+// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
+// LEWIDTH-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// LEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1
+// LEWIDTH-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1
+// LEWIDTH-NEXT: ret void
+//
+// BEWIDTH-LABEL: @increment_v_c_st17(
+// BEWIDTH-NEXT: entry:
+// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ST17:%.*]], %struct.st17* [[S:%.*]], i32 0, i32 0, i32 4
+// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1
+// BEWIDTH-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1
+// BEWIDTHNUM-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1
+// BEWIDTH-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1
+// BEWIDTH-NEXT: ret void
//
void increment_v_c_st17(volatile struct st17 *s) {
s->c++;
Index: clang/lib/Frontend/CompilerInvocation.cpp
===================================================================
--- clang/lib/Frontend/CompilerInvocation.cpp
+++ clang/lib/Frontend/CompilerInvocation.cpp
@@ -1438,6 +1438,7 @@
std::string(Args.getLastArgValue(OPT_fsymbol_partition_EQ));
Opts.ForceAAPCSBitfieldLoad = Args.hasArg(OPT_ForceAAPCSBitfieldLoad);
+ Opts.ForceNoAAPCSBitfieldWidth = Args.hasArg(OPT_ForceNoAAPCSBitfieldWidth);
return Success;
}
Index: clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
===================================================================
--- clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -109,6 +109,14 @@
D->isMsStruct(Context);
}
+ /// Helper function to check if we are targeting AAPCS
+ bool isAAPCS() const {
+ return Context.getTargetInfo().getABI().startswith("aapcs");
+ }
+
+ /// Helper function to check if the target machine is BigEndian
+ bool isBE() const { return Context.getTargetInfo().isBigEndian(); }
+
/// The Itanium base layout rule allows virtual bases to overlap
/// other bases, which complicates layout in specific ways.
///
@@ -172,7 +180,8 @@
void lowerUnion();
void accumulateFields();
void accumulateBitFields(RecordDecl::field_iterator Field,
- RecordDecl::field_iterator FieldEnd);
+ RecordDecl::field_iterator FieldEnd);
+ void computeVolatileBitfields();
void accumulateBases();
void accumulateVPtrs();
void accumulateVBases();
@@ -237,6 +246,9 @@
// least-significant-bit.
if (DataLayout.isBigEndian())
Info.Offset = Info.StorageSize - (Info.Offset + Info.Size);
+ Info.VolatileStorageSize = 0;
+ Info.VolatileOffset = 0;
+ Info.VolatileStorageOffset = CharUnits::Zero();
}
void CGRecordLowering::lower(bool NVBaseType) {
@@ -261,15 +273,19 @@
// 8) Format the complete list of members in a way that can be consumed by
// CodeGenTypes::ComputeRecordLayout.
CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
- if (D->isUnion())
- return lowerUnion();
+ if (D->isUnion()) {
+ lowerUnion();
+ return computeVolatileBitfields();
+ }
accumulateFields();
// RD implies C++.
if (RD) {
accumulateVPtrs();
accumulateBases();
- if (Members.empty())
- return appendPaddingBytes(Size);
+ if (Members.empty()) {
+ appendPaddingBytes(Size);
+ return computeVolatileBitfields();
+ }
if (!NVBaseType)
accumulateVBases();
}
@@ -281,6 +297,7 @@
Members.pop_back();
calculateZeroInit();
fillOutputFields();
+ computeVolatileBitfields();
}
void CGRecordLowering::lowerUnion() {
@@ -500,11 +517,122 @@
}
}
+/// Volatile bit-fields might have some access regulations, as done
+/// by the AAPCS that defines that, when possible, bit-fields should
+/// be accessed using contaiers of the declared type width:
+/// Volatile bit-fields – preserving number and width of container accesses
+/// When a volatile bit-field is read, and its container does not overlap
+/// with any non-bit-field member, itscontainer must be read exactly once
+/// using the access width appropriate to the type of the container.
+/// When a volatile bit-field is written, and its container does not overlap
+/// with any non-bit-field member, itscontainer must be read exactly once and
+/// written exactly once using the access width appropriate to thetype of the
+/// container. The two accesses are not atomic.
+/// Enforcing the width restriction can be disable using -fno-AAPCSBitfieldWidth
+/// Enforcing the number of loads can be enabled using -fAAPCSBitfieldLoad
+void CGRecordLowering::computeVolatileBitfields() {
+ if (!isAAPCS() || Types.getCodeGenOpts().ForceNoAAPCSBitfieldWidth)
+ return;
+
+ for (auto &I : BitFields) {
+ const FieldDecl *Field = I.first;
+ CGBitFieldInfo &Info = I.second;
+ llvm::Type *ResLTy = Types.ConvertTypeForMem(Field->getType());
+ // If the register alignment is less than the type width, we can't enforce a
+ // aligned load, bail out
+ if ((uint64_t)(Context.toBits(Layout.getAlignment())) <
+ ResLTy->getPrimitiveSizeInBits())
+ continue;
+ // CGRecordLowering::setBitFieldInfo() pre-adjusts the bitfield offsets
+ // for big-endian targets, but it assumes a container of width
+ // Info.StorageSize. Since AAPCS uses a different container size (width
+ // of the type), we first undo that calculation here and redo it once
+ // the bitfield offset within the new container is calculated
+ const unsigned OldOffset =
+ isBE() ? Info.StorageSize - (Info.Offset + Info.Size) : Info.Offset;
+ // Offset to the bitfield from the beginning of the struct
+ const unsigned AbsoluteOffset =
+ Context.toBits(Info.StorageOffset) + OldOffset;
+
+ // Container size is the width of the bitfield type
+ const unsigned StorageSize = ResLTy->getPrimitiveSizeInBits();
+ // Nothing to do if the access uses the desired
+ // container width and is naturally aligned
+ if (Info.StorageSize == StorageSize && (OldOffset % StorageSize == 0))
+ continue;
+
+ // Offset within the container
+ unsigned Offset = AbsoluteOffset & (StorageSize - 1);
+ // Bail out if an aligned load of the container cannot cover the entire
+ // bitfield. This can happen for example, if the bitfield is part of a
+ // packed struct. AAPCS does not define access rules for such cases, we let
+ // clang to follow its own rules.
+ if (Offset + Info.Size > StorageSize)
+ continue;
+
+ // Re-adjust offsets for big-endian targets
+ if (isBE())
+ Offset = StorageSize - (Offset + Info.Size);
+
+ const CharUnits StorageOffset =
+ Context.toCharUnitsFromBits(AbsoluteOffset & ~(StorageSize - 1));
+ const CharUnits End = StorageOffset +
+ Context.toCharUnitsFromBits(StorageSize) -
+ CharUnits::One();
+
+ const ASTRecordLayout &Layout =
+ Context.getASTRecordLayout(Field->getParent());
+ // If we access outside memory outside the record, than bail out
+ const CharUnits RecordSize = Layout.getSize();
+ if (End >= RecordSize)
+ continue;
+
+ // Bail out if performing this load would access non-bitfields members
+ bool Conflict = false;
+ for (const auto &it : Fields) {
+ const FieldDecl *F = it.first;
+ // Allow bitfields overlaps
+ if (F->isBitField())
+ continue;
+ const CharUnits FOffset = Context.toCharUnitsFromBits(
+ Layout.getFieldOffset(F->getFieldIndex()));
+ const CharUnits FEnd =
+ FOffset +
+ Context.toCharUnitsFromBits(
+ Types.ConvertTypeForMem(F->getType())->getPrimitiveSizeInBits()) -
+ CharUnits::One();
+ // The other field starts after the desired load end.
+ if (End < FOffset)
+ break;
+
+ // The other field ends before the desired load offset.
+ if (FEnd < StorageOffset)
+ continue;
+
+ // The desired load overlaps a non-bitfiel member, bail out.
+ Conflict = true;
+ break;
+ }
+
+ if (Conflict)
+ continue;
+ // Write the new bitfield access parameters
+ // As the storage offset now is defined as the number of elements from the
+ // start of the structure, we should divide the Offset by the element size
+ Info.VolatileStorageOffset =
+ StorageOffset / Context.toCharUnitsFromBits(StorageSize).getQuantity();
+ Info.VolatileStorageSize = StorageSize;
+ Info.VolatileOffset = Offset;
+ }
+}
+
void CGRecordLowering::accumulateVPtrs() {
if (Layout.hasOwnVFPtr())
- Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
- llvm::FunctionType::get(getIntNType(32), /*isVarArg=*/true)->
- getPointerTo()->getPointerTo()));
+ Members.push_back(
+ MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
+ llvm::FunctionType::get(getIntNType(32), /*isVarArg=*/true)
+ ->getPointerTo()
+ ->getPointerTo()));
if (Layout.hasOwnVBPtr())
Members.push_back(MemberInfo(Layout.getVBPtrOffset(), MemberInfo::VBPtr,
llvm::Type::getInt32PtrTy(Types.getLLVMContext())));
@@ -726,7 +854,8 @@
Offset = StorageSize - (Offset + Size);
}
- return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageOffset);
+ return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageOffset,
+ Offset, StorageSize, StorageOffset);
}
CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
@@ -845,8 +974,10 @@
assert(Info.StorageSize <= SL->getSizeInBits() &&
"Union not large enough for bitfield storage");
} else {
- assert(Info.StorageSize ==
- getDataLayout().getTypeAllocSizeInBits(ElementTy) &&
+ assert((Info.StorageSize ==
+ getDataLayout().getTypeAllocSizeInBits(ElementTy) ||
+ Info.VolatileStorageSize ==
+ getDataLayout().getTypeAllocSizeInBits(ElementTy)) &&
"Storage size does not match the element type size");
}
assert(Info.Size > 0 && "Empty bitfield!");
@@ -894,13 +1025,12 @@
void CGBitFieldInfo::print(raw_ostream &OS) const {
OS << "<CGBitFieldInfo"
- << " Offset:" << Offset
- << " Size:" << Size
- << " IsSigned:" << IsSigned
+ << " Offset:" << Offset << " Size:" << Size << " IsSigned:" << IsSigned
<< " StorageSize:" << StorageSize
- << " StorageOffset:" << StorageOffset.getQuantity() << ">";
+ << " StorageOffset:" << StorageOffset.getQuantity()
+ << " VolatileOffset:" << VolatileOffset
+ << " VolatileStorageSize:" << VolatileStorageSize
+ << " VolatileStorageOffset:" << VolatileStorageOffset.getQuantity() << ">";
}
-LLVM_DUMP_METHOD void CGBitFieldInfo::dump() const {
- print(llvm::errs());
-}
+LLVM_DUMP_METHOD void CGBitFieldInfo::dump() const { print(llvm::errs()); }
Index: clang/lib/CodeGen/CGRecordLayout.h
===================================================================
--- clang/lib/CodeGen/CGRecordLayout.h
+++ clang/lib/CodeGen/CGRecordLayout.h
@@ -80,13 +80,30 @@
/// The offset of the bitfield storage from the start of the struct.
CharUnits StorageOffset;
+ /// The offset within a contiguous run of bitfields that are represented as
+ /// a single "field" within the LLVM struct type. This offset is in bits.
+ unsigned VolatileOffset : 16;
+
+ /// The storage size in bits which should be used when accessing this
+ /// bitfield.
+ unsigned VolatileStorageSize;
+
+ /// The offset of the bitfield storage from the start of the struct.
+ CharUnits VolatileStorageOffset;
+
CGBitFieldInfo()
- : Offset(), Size(), IsSigned(), StorageSize(), StorageOffset() {}
+ : Offset(), Size(), IsSigned(), StorageSize(), StorageOffset(),
+ VolatileOffset(), VolatileStorageSize(), VolatileStorageOffset() {}
CGBitFieldInfo(unsigned Offset, unsigned Size, bool IsSigned,
- unsigned StorageSize, CharUnits StorageOffset)
+ unsigned StorageSize, CharUnits StorageOffset,
+ unsigned VolatileOffset, unsigned VolatileStorageSize,
+ CharUnits VolatileStorageOffset)
: Offset(Offset), Size(Size), IsSigned(IsSigned),
- StorageSize(StorageSize), StorageOffset(StorageOffset) {}
+ StorageSize(StorageSize), StorageOffset(StorageOffset),
+ VolatileOffset(VolatileOffset),
+ VolatileStorageSize(VolatileStorageSize),
+ VolatileStorageOffset(VolatileStorageOffset) {}
void print(raw_ostream &OS) const;
void dump() const;
Index: clang/lib/CodeGen/CGExpr.cpp
===================================================================
--- clang/lib/CodeGen/CGExpr.cpp
+++ clang/lib/CodeGen/CGExpr.cpp
@@ -1835,22 +1835,27 @@
llvm::Type *ResLTy = ConvertType(LV.getType());
Address Ptr = LV.getBitFieldAddress();
- llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
-
+ llvm::Value *Val =
+ Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
+
+ bool UseVolatile = LV.isVolatileQualified() &&
+ Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
+ const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
+ const unsigned StorageSize =
+ UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
if (Info.IsSigned) {
- assert(static_cast<unsigned>(Info.Offset + Info.Size) <= Info.StorageSize);
- unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size;
+ assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
+ unsigned HighBits = StorageSize - Offset - Info.Size;
if (HighBits)
Val = Builder.CreateShl(Val, HighBits, "bf.shl");
- if (Info.Offset + HighBits)
- Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr");
+ if (Offset + HighBits)
+ Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
} else {
- if (Info.Offset)
- Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr");
- if (static_cast<unsigned>(Info.Offset) + Info.Size < Info.StorageSize)
- Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize,
- Info.Size),
- "bf.clear");
+ if (Offset)
+ Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
+ if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
+ Val = Builder.CreateAnd(
+ Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
}
Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
EmitScalarRangeCheck(Val, LV.getType(), Loc);
@@ -2045,39 +2050,43 @@
/*isSigned=*/false);
llvm::Value *MaskedVal = SrcVal;
+ const bool UseVolatile = !CGM.getCodeGenOpts().ForceNoAAPCSBitfieldWidth &&
+ Dst.isVolatileQualified() &&
+ Info.VolatileStorageSize != 0 &&
+ isAAPCS(CGM.getTarget());
+ const unsigned StorageSize =
+ UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
+ const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
// See if there are other bits in the bitfield's storage we'll need to load
// and mask together with source before storing.
- if (Info.StorageSize != Info.Size) {
- assert(Info.StorageSize > Info.Size && "Invalid bitfield size.");
+ if (StorageSize != Info.Size) {
+ assert(StorageSize > Info.Size && "Invalid bitfield size.");
llvm::Value *Val =
- Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
+ Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
// Mask the source value as needed.
if (!hasBooleanRepresentation(Dst.getType()))
- SrcVal = Builder.CreateAnd(SrcVal,
- llvm::APInt::getLowBitsSet(Info.StorageSize,
- Info.Size),
- "bf.value");
+ SrcVal = Builder.CreateAnd(
+ SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
+ "bf.value");
MaskedVal = SrcVal;
- if (Info.Offset)
- SrcVal = Builder.CreateShl(SrcVal, Info.Offset, "bf.shl");
+ if (Offset)
+ SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
// Mask out the original value.
- Val = Builder.CreateAnd(Val,
- ~llvm::APInt::getBitsSet(Info.StorageSize,
- Info.Offset,
- Info.Offset + Info.Size),
- "bf.clear");
+ Val = Builder.CreateAnd(
+ Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
+ "bf.clear");
// Or together the unchanged values and the source value.
SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
} else {
- assert(Info.Offset == 0);
+ assert(Offset == 0);
// According to the AACPS:
// When a volatile bit-field is written, and its container does not overlap
- // with any non-bit-field member, its container must be read exactly once and
- // written exactly once using the access width appropriate to the type of the
- // container. The two accesses are not atomic.
+ // with any non-bit-field member, its container must be read exactly once
+ // and written exactly once using the access width appropriate to the type
+ // of the container. The two accesses are not atomic.
if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
Builder.CreateLoad(Ptr, true, "bf.load");
@@ -2092,8 +2101,8 @@
// Sign extend the value if needed.
if (Info.IsSigned) {
- assert(Info.Size <= Info.StorageSize);
- unsigned HighBits = Info.StorageSize - Info.Size;
+ assert(Info.Size <= StorageSize);
+ unsigned HighBits = StorageSize - Info.Size;
if (HighBits) {
ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
@@ -4075,32 +4084,45 @@
if (field->isBitField()) {
const CGRecordLayout &RL =
- CGM.getTypes().getCGRecordLayout(field->getParent());
+ CGM.getTypes().getCGRecordLayout(field->getParent());
const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
+ const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
+ !CGM.getCodeGenOpts().ForceNoAAPCSBitfieldWidth &&
+ Info.VolatileStorageSize != 0 &&
+ field->getType()
+ .withCVRQualifiers(base.getVRQualifiers())
+ .isVolatileQualified();
Address Addr = base.getAddress(*this);
unsigned Idx = RL.getLLVMFieldNo(field);
const RecordDecl *rec = field->getParent();
- if (!IsInPreservedAIRegion &&
- (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
- if (Idx != 0)
- // For structs, we GEP to the field that the record layout suggests.
- Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
- } else {
- llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
- getContext().getRecordType(rec), rec->getLocation());
- Addr = Builder.CreatePreserveStructAccessIndex(Addr, Idx,
- getDebugInfoFIndex(rec, field->getFieldIndex()),
- DbgInfo);
+ if (!UseVolatile) {
+ if (!IsInPreservedAIRegion &&
+ (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
+ if (Idx != 0)
+ // For structs, we GEP to the field that the record layout suggests.
+ Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
+ } else {
+ llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
+ getContext().getRecordType(rec), rec->getLocation());
+ Addr = Builder.CreatePreserveStructAccessIndex(
+ Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
+ DbgInfo);
+ }
}
-
+ const unsigned SS =
+ UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
// Get the access type.
- llvm::Type *FieldIntTy =
- llvm::Type::getIntNTy(getLLVMContext(), Info.StorageSize);
+ llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
if (Addr.getElementType() != FieldIntTy)
Addr = Builder.CreateElementBitCast(Addr, FieldIntTy);
+ if (UseVolatile) {
+ const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
+ if (VolatileOffset)
+ Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
+ }
QualType fieldType =
- field->getType().withCVRQualifiers(base.getVRQualifiers());
+ field->getType().withCVRQualifiers(base.getVRQualifiers());
// TODO: Support TBAA for bit fields.
LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
Index: clang/include/clang/Driver/Options.td
===================================================================
--- clang/include/clang/Driver/Options.td
+++ clang/include/clang/Driver/Options.td
@@ -2325,6 +2325,9 @@
def ForceAAPCSBitfieldLoad : Flag<["-"], "fAAPCSBitfieldLoad">, Group<m_arm_Features_Group>,
Flags<[DriverOption,CC1Option]>,
HelpText<"Follows the AAPCS standard that all volatile bit-field write generates at least one load. (ARM only).">;
+def ForceNoAAPCSBitfieldWidth : Flag<["-"], "fno-AAPCSBitfieldWidth">, Group<m_arm_Features_Group>,
+ Flags<[DriverOption,CC1Option]>,
+ HelpText<"Does not follow the AAPCS standard that volatile bit-field width is dictated by the field declarative type. (ARM only).">;
def mgeneral_regs_only : Flag<["-"], "mgeneral-regs-only">, Group<m_aarch64_Features_Group>,
HelpText<"Generate code which only uses the general purpose registers (AArch64 only)">;
Index: clang/include/clang/Basic/CodeGenOptions.def
===================================================================
--- clang/include/clang/Basic/CodeGenOptions.def
+++ clang/include/clang/Basic/CodeGenOptions.def
@@ -388,9 +388,13 @@
/// Whether to emit unused static constants.
CODEGENOPT(KeepStaticConsts, 1, 0)
-/// Whether to not follow the AAPCS that enforce at least one read before storing to a volatile bitfield
+/// Whether to follow the AAPCS enforcing at least one read before storing to a volatile bitfield
CODEGENOPT(ForceAAPCSBitfieldLoad, 1, 0)
+/// Whether to not follow the AAPCS that enforces volatile bit-field access width to be
+/// according to the field declaring type width.
+CODEGENOPT(ForceNoAAPCSBitfieldWidth, 1, 0)
+
#undef CODEGENOPT
#undef ENUM_CODEGENOPT
#undef VALUE_CODEGENOPT
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits