This revision was automatically updated to reflect the committed changes.
Closed by commit rG372cb38f4510: [Codegen] Emit both AssumeAlignedAttr and
AllocAlignAttr assumptions if they… (authored by lebedev.ri).
Repository:
rG LLVM Github Monorepo
CHANGES SINCE LAST ACTION
https://reviews.llvm.org/D72979/new/
https://reviews.llvm.org/D72979
Files:
clang/lib/CodeGen/CGCall.cpp
clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c
Index: clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c
@@ -0,0 +1,77 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s |
FileCheck %s
+
+void *my_aligned_alloc(int size, int alignment)
__attribute__((assume_aligned(32), alloc_align(2)));
+
+// CHECK-LABEL: @t0_immediate0(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[CALL:%.*]] = call i8* @my_aligned_alloc(i32 320, i32 16)
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT1]], 15
+// CHECK-NEXT: [[MASKCOND3:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND3]])
+// CHECK-NEXT: ret i8* [[CALL]]
+//
+void *t0_immediate0() {
+ return my_aligned_alloc(320, 16);
+};
+
+// CHECK-LABEL: @t1_immediate1(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[CALL:%.*]] = call i8* @my_aligned_alloc(i32 320, i32 32)
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT1]], 31
+// CHECK-NEXT: [[MASKCOND3:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND3]])
+// CHECK-NEXT: ret i8* [[CALL]]
+//
+void *t1_immediate1() {
+ return my_aligned_alloc(320, 32);
+};
+
+// CHECK-LABEL: @t2_immediate2(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[CALL:%.*]] = call i8* @my_aligned_alloc(i32 320, i32 64)
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT1]], 63
+// CHECK-NEXT: [[MASKCOND3:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND3]])
+// CHECK-NEXT: ret i8* [[CALL]]
+//
+void *t2_immediate2() {
+ return my_aligned_alloc(320, 64);
+};
+
+// CHECK-LABEL: @t3_variable(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[ALIGNMENT_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT: store i32 [[ALIGNMENT:%.*]], i32* [[ALIGNMENT_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ALIGNMENT_ADDR]], align 4
+// CHECK-NEXT: [[CALL:%.*]] = call i8* @my_aligned_alloc(i32 320, i32
[[TMP0]])
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64
+// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
+// CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT1]], [[MASK]]
+// CHECK-NEXT: [[MASKCOND3:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND3]])
+// CHECK-NEXT: ret i8* [[CALL]]
+//
+void *t3_variable(int alignment) {
+ return my_aligned_alloc(320, alignment);
+};
Index: clang/lib/CodeGen/CGCall.cpp
===================================================================
--- clang/lib/CodeGen/CGCall.cpp
+++ clang/lib/CodeGen/CGCall.cpp
@@ -4628,7 +4628,8 @@
llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc,
AA->getLocation(),
AlignmentCI, OffsetValue);
- } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
+ }
+ if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
llvm::Value *AlignmentVal = CallArgs[AA->getParamIndex().getLLVMIndex()]
.getRValue(*this)
.getScalarVal();
Index: clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c
@@ -0,0 +1,77 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s
+
+void *my_aligned_alloc(int size, int alignment) __attribute__((assume_aligned(32), alloc_align(2)));
+
+// CHECK-LABEL: @t0_immediate0(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[CALL:%.*]] = call i8* @my_aligned_alloc(i32 320, i32 16)
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT1]], 15
+// CHECK-NEXT: [[MASKCOND3:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND3]])
+// CHECK-NEXT: ret i8* [[CALL]]
+//
+void *t0_immediate0() {
+ return my_aligned_alloc(320, 16);
+};
+
+// CHECK-LABEL: @t1_immediate1(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[CALL:%.*]] = call i8* @my_aligned_alloc(i32 320, i32 32)
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT1]], 31
+// CHECK-NEXT: [[MASKCOND3:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND3]])
+// CHECK-NEXT: ret i8* [[CALL]]
+//
+void *t1_immediate1() {
+ return my_aligned_alloc(320, 32);
+};
+
+// CHECK-LABEL: @t2_immediate2(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[CALL:%.*]] = call i8* @my_aligned_alloc(i32 320, i32 64)
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT1]], 63
+// CHECK-NEXT: [[MASKCOND3:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND3]])
+// CHECK-NEXT: ret i8* [[CALL]]
+//
+void *t2_immediate2() {
+ return my_aligned_alloc(320, 64);
+};
+
+// CHECK-LABEL: @t3_variable(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[ALIGNMENT_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT: store i32 [[ALIGNMENT:%.*]], i32* [[ALIGNMENT_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ALIGNMENT_ADDR]], align 4
+// CHECK-NEXT: [[CALL:%.*]] = call i8* @my_aligned_alloc(i32 320, i32 [[TMP0]])
+// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64
+// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
+// CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint i8* [[CALL]] to i64
+// CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT1]], [[MASK]]
+// CHECK-NEXT: [[MASKCOND3:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0
+// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND3]])
+// CHECK-NEXT: ret i8* [[CALL]]
+//
+void *t3_variable(int alignment) {
+ return my_aligned_alloc(320, alignment);
+};
Index: clang/lib/CodeGen/CGCall.cpp
===================================================================
--- clang/lib/CodeGen/CGCall.cpp
+++ clang/lib/CodeGen/CGCall.cpp
@@ -4628,7 +4628,8 @@
llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
AlignmentCI, OffsetValue);
- } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
+ }
+ if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
llvm::Value *AlignmentVal = CallArgs[AA->getParamIndex().getLLVMIndex()]
.getRValue(*this)
.getScalarVal();
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits