MattDevereau updated this revision to Diff 409236.
MattDevereau added a comment.

Removed -O1 and -ffast-math flags


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D119926/new/

https://reviews.llvm.org/D119926

Files:
  clang/lib/AST/ASTContext.cpp
  clang/test/CodeGen/aarch64-complex-half-math.c

Index: clang/test/CodeGen/aarch64-complex-half-math.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-complex-half-math.c
@@ -0,0 +1,419 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 %s -emit-llvm -triple aarch64-unknown-unknown -o - | FileCheck %s --check-prefix=AARCH64
+// REQUIRES: aarch64-registered-target
+
+// AARCH64-LABEL: @add_float_rr(
+// AARCH64-NEXT:  entry:
+// AARCH64-NEXT:    [[RETVAL:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[A_ADDR:%.*]] = alloca half, align 2
+// AARCH64-NEXT:    [[B_ADDR:%.*]] = alloca half, align 2
+// AARCH64-NEXT:    store half [[A:%.*]], half* [[A_ADDR]], align 2
+// AARCH64-NEXT:    store half [[B:%.*]], half* [[B_ADDR]], align 2
+// AARCH64-NEXT:    [[TMP0:%.*]] = load half, half* [[A_ADDR]], align 2
+// AARCH64-NEXT:    [[TMP1:%.*]] = load half, half* [[B_ADDR]], align 2
+// AARCH64-NEXT:    [[ADD:%.*]] = fadd half [[TMP0]], [[TMP1]]
+// AARCH64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 0
+// AARCH64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 1
+// AARCH64-NEXT:    store half [[ADD]], half* [[RETVAL_REALP]], align 2
+// AARCH64-NEXT:    store half 0xH0000, half* [[RETVAL_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP2:%.*]] = load { half, half }, { half, half }* [[RETVAL]], align 2
+// AARCH64-NEXT:    ret { half, half } [[TMP2]]
+//
+_Float16 _Complex add_float_rr(_Float16 a, _Float16 b) {
+  return a + b;
+}
+// AARCH64-LABEL: @add_float_cr(
+// AARCH64-NEXT:  entry:
+// AARCH64-NEXT:    [[RETVAL:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[A:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[B_ADDR:%.*]] = alloca half, align 2
+// AARCH64-NEXT:    [[TMP0:%.*]] = bitcast { half, half }* [[A]] to [2 x half]*
+// AARCH64-NEXT:    store [2 x half] [[A_COERCE:%.*]], [2 x half]* [[TMP0]], align 2
+// AARCH64-NEXT:    store half [[B:%.*]], half* [[B_ADDR]], align 2
+// AARCH64-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[A]], i32 0, i32 0
+// AARCH64-NEXT:    [[A_REAL:%.*]] = load half, half* [[A_REALP]], align 2
+// AARCH64-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[A]], i32 0, i32 1
+// AARCH64-NEXT:    [[A_IMAG:%.*]] = load half, half* [[A_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP1:%.*]] = load half, half* [[B_ADDR]], align 2
+// AARCH64-NEXT:    [[ADD_R:%.*]] = fadd half [[A_REAL]], [[TMP1]]
+// AARCH64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 0
+// AARCH64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 1
+// AARCH64-NEXT:    store half [[ADD_R]], half* [[RETVAL_REALP]], align 2
+// AARCH64-NEXT:    store half [[A_IMAG]], half* [[RETVAL_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP2:%.*]] = load { half, half }, { half, half }* [[RETVAL]], align 2
+// AARCH64-NEXT:    ret { half, half } [[TMP2]]
+//
+_Float16 _Complex add_float_cr(_Float16 _Complex a, _Float16 b) {
+  return a + b;
+}
+// AARCH64-LABEL: @add_float_rc(
+// AARCH64-NEXT:  entry:
+// AARCH64-NEXT:    [[RETVAL:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[B:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[A_ADDR:%.*]] = alloca half, align 2
+// AARCH64-NEXT:    [[TMP0:%.*]] = bitcast { half, half }* [[B]] to [2 x half]*
+// AARCH64-NEXT:    store [2 x half] [[B_COERCE:%.*]], [2 x half]* [[TMP0]], align 2
+// AARCH64-NEXT:    store half [[A:%.*]], half* [[A_ADDR]], align 2
+// AARCH64-NEXT:    [[TMP1:%.*]] = load half, half* [[A_ADDR]], align 2
+// AARCH64-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[B]], i32 0, i32 0
+// AARCH64-NEXT:    [[B_REAL:%.*]] = load half, half* [[B_REALP]], align 2
+// AARCH64-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[B]], i32 0, i32 1
+// AARCH64-NEXT:    [[B_IMAG:%.*]] = load half, half* [[B_IMAGP]], align 2
+// AARCH64-NEXT:    [[ADD_R:%.*]] = fadd half [[TMP1]], [[B_REAL]]
+// AARCH64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 0
+// AARCH64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 1
+// AARCH64-NEXT:    store half [[ADD_R]], half* [[RETVAL_REALP]], align 2
+// AARCH64-NEXT:    store half [[B_IMAG]], half* [[RETVAL_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP2:%.*]] = load { half, half }, { half, half }* [[RETVAL]], align 2
+// AARCH64-NEXT:    ret { half, half } [[TMP2]]
+//
+_Float16 _Complex add_float_rc(_Float16 a, _Float16 _Complex b) {
+  return a + b;
+}
+// AARCH64-LABEL: @add_float_cc(
+// AARCH64-NEXT:  entry:
+// AARCH64-NEXT:    [[RETVAL:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[A:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[B:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[TMP0:%.*]] = bitcast { half, half }* [[A]] to [2 x half]*
+// AARCH64-NEXT:    store [2 x half] [[A_COERCE:%.*]], [2 x half]* [[TMP0]], align 2
+// AARCH64-NEXT:    [[TMP1:%.*]] = bitcast { half, half }* [[B]] to [2 x half]*
+// AARCH64-NEXT:    store [2 x half] [[B_COERCE:%.*]], [2 x half]* [[TMP1]], align 2
+// AARCH64-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[A]], i32 0, i32 0
+// AARCH64-NEXT:    [[A_REAL:%.*]] = load half, half* [[A_REALP]], align 2
+// AARCH64-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[A]], i32 0, i32 1
+// AARCH64-NEXT:    [[A_IMAG:%.*]] = load half, half* [[A_IMAGP]], align 2
+// AARCH64-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[B]], i32 0, i32 0
+// AARCH64-NEXT:    [[B_REAL:%.*]] = load half, half* [[B_REALP]], align 2
+// AARCH64-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[B]], i32 0, i32 1
+// AARCH64-NEXT:    [[B_IMAG:%.*]] = load half, half* [[B_IMAGP]], align 2
+// AARCH64-NEXT:    [[ADD_R:%.*]] = fadd half [[A_REAL]], [[B_REAL]]
+// AARCH64-NEXT:    [[ADD_I:%.*]] = fadd half [[A_IMAG]], [[B_IMAG]]
+// AARCH64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 0
+// AARCH64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 1
+// AARCH64-NEXT:    store half [[ADD_R]], half* [[RETVAL_REALP]], align 2
+// AARCH64-NEXT:    store half [[ADD_I]], half* [[RETVAL_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP2:%.*]] = load { half, half }, { half, half }* [[RETVAL]], align 2
+// AARCH64-NEXT:    ret { half, half } [[TMP2]]
+//
+_Float16 _Complex add_float_cc(_Float16 _Complex a, _Float16 _Complex b) {
+  return a + b;
+}
+
+// AARCH64-LABEL: @sub_float_rr(
+// AARCH64-NEXT:  entry:
+// AARCH64-NEXT:    [[RETVAL:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[A_ADDR:%.*]] = alloca half, align 2
+// AARCH64-NEXT:    [[B_ADDR:%.*]] = alloca half, align 2
+// AARCH64-NEXT:    store half [[A:%.*]], half* [[A_ADDR]], align 2
+// AARCH64-NEXT:    store half [[B:%.*]], half* [[B_ADDR]], align 2
+// AARCH64-NEXT:    [[TMP0:%.*]] = load half, half* [[A_ADDR]], align 2
+// AARCH64-NEXT:    [[TMP1:%.*]] = load half, half* [[B_ADDR]], align 2
+// AARCH64-NEXT:    [[SUB:%.*]] = fsub half [[TMP0]], [[TMP1]]
+// AARCH64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 0
+// AARCH64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 1
+// AARCH64-NEXT:    store half [[SUB]], half* [[RETVAL_REALP]], align 2
+// AARCH64-NEXT:    store half 0xH0000, half* [[RETVAL_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP2:%.*]] = load { half, half }, { half, half }* [[RETVAL]], align 2
+// AARCH64-NEXT:    ret { half, half } [[TMP2]]
+//
+_Float16 _Complex sub_float_rr(_Float16 a, _Float16 b) {
+  return a - b;
+}
+// AARCH64-LABEL: @sub_float_cr(
+// AARCH64-NEXT:  entry:
+// AARCH64-NEXT:    [[RETVAL:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[A:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[B_ADDR:%.*]] = alloca half, align 2
+// AARCH64-NEXT:    [[TMP0:%.*]] = bitcast { half, half }* [[A]] to [2 x half]*
+// AARCH64-NEXT:    store [2 x half] [[A_COERCE:%.*]], [2 x half]* [[TMP0]], align 2
+// AARCH64-NEXT:    store half [[B:%.*]], half* [[B_ADDR]], align 2
+// AARCH64-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[A]], i32 0, i32 0
+// AARCH64-NEXT:    [[A_REAL:%.*]] = load half, half* [[A_REALP]], align 2
+// AARCH64-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[A]], i32 0, i32 1
+// AARCH64-NEXT:    [[A_IMAG:%.*]] = load half, half* [[A_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP1:%.*]] = load half, half* [[B_ADDR]], align 2
+// AARCH64-NEXT:    [[SUB_R:%.*]] = fsub half [[A_REAL]], [[TMP1]]
+// AARCH64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 0
+// AARCH64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 1
+// AARCH64-NEXT:    store half [[SUB_R]], half* [[RETVAL_REALP]], align 2
+// AARCH64-NEXT:    store half [[A_IMAG]], half* [[RETVAL_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP2:%.*]] = load { half, half }, { half, half }* [[RETVAL]], align 2
+// AARCH64-NEXT:    ret { half, half } [[TMP2]]
+//
+_Float16 _Complex sub_float_cr(_Float16 _Complex a, _Float16 b) {
+  return a - b;
+}
+// AARCH64-LABEL: @sub_float_rc(
+// AARCH64-NEXT:  entry:
+// AARCH64-NEXT:    [[RETVAL:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[B:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[A_ADDR:%.*]] = alloca half, align 2
+// AARCH64-NEXT:    [[TMP0:%.*]] = bitcast { half, half }* [[B]] to [2 x half]*
+// AARCH64-NEXT:    store [2 x half] [[B_COERCE:%.*]], [2 x half]* [[TMP0]], align 2
+// AARCH64-NEXT:    store half [[A:%.*]], half* [[A_ADDR]], align 2
+// AARCH64-NEXT:    [[TMP1:%.*]] = load half, half* [[A_ADDR]], align 2
+// AARCH64-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[B]], i32 0, i32 0
+// AARCH64-NEXT:    [[B_REAL:%.*]] = load half, half* [[B_REALP]], align 2
+// AARCH64-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[B]], i32 0, i32 1
+// AARCH64-NEXT:    [[B_IMAG:%.*]] = load half, half* [[B_IMAGP]], align 2
+// AARCH64-NEXT:    [[SUB_R:%.*]] = fsub half [[TMP1]], [[B_REAL]]
+// AARCH64-NEXT:    [[SUB_I:%.*]] = fneg half [[B_IMAG]]
+// AARCH64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 0
+// AARCH64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 1
+// AARCH64-NEXT:    store half [[SUB_R]], half* [[RETVAL_REALP]], align 2
+// AARCH64-NEXT:    store half [[SUB_I]], half* [[RETVAL_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP2:%.*]] = load { half, half }, { half, half }* [[RETVAL]], align 2
+// AARCH64-NEXT:    ret { half, half } [[TMP2]]
+//
+_Float16 _Complex sub_float_rc(_Float16 a, _Float16 _Complex b) {
+  return a - b;
+}
+// AARCH64-LABEL: @sub_float_cc(
+// AARCH64-NEXT:  entry:
+// AARCH64-NEXT:    [[RETVAL:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[A:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[B:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[TMP0:%.*]] = bitcast { half, half }* [[A]] to [2 x half]*
+// AARCH64-NEXT:    store [2 x half] [[A_COERCE:%.*]], [2 x half]* [[TMP0]], align 2
+// AARCH64-NEXT:    [[TMP1:%.*]] = bitcast { half, half }* [[B]] to [2 x half]*
+// AARCH64-NEXT:    store [2 x half] [[B_COERCE:%.*]], [2 x half]* [[TMP1]], align 2
+// AARCH64-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[A]], i32 0, i32 0
+// AARCH64-NEXT:    [[A_REAL:%.*]] = load half, half* [[A_REALP]], align 2
+// AARCH64-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[A]], i32 0, i32 1
+// AARCH64-NEXT:    [[A_IMAG:%.*]] = load half, half* [[A_IMAGP]], align 2
+// AARCH64-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[B]], i32 0, i32 0
+// AARCH64-NEXT:    [[B_REAL:%.*]] = load half, half* [[B_REALP]], align 2
+// AARCH64-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[B]], i32 0, i32 1
+// AARCH64-NEXT:    [[B_IMAG:%.*]] = load half, half* [[B_IMAGP]], align 2
+// AARCH64-NEXT:    [[SUB_R:%.*]] = fsub half [[A_REAL]], [[B_REAL]]
+// AARCH64-NEXT:    [[SUB_I:%.*]] = fsub half [[A_IMAG]], [[B_IMAG]]
+// AARCH64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 0
+// AARCH64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 1
+// AARCH64-NEXT:    store half [[SUB_R]], half* [[RETVAL_REALP]], align 2
+// AARCH64-NEXT:    store half [[SUB_I]], half* [[RETVAL_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP2:%.*]] = load { half, half }, { half, half }* [[RETVAL]], align 2
+// AARCH64-NEXT:    ret { half, half } [[TMP2]]
+//
+_Float16 _Complex sub_float_cc(_Float16 _Complex a, _Float16 _Complex b) {
+  return a - b;
+}
+
+// AARCH64-LABEL: @mul_float_rr(
+// AARCH64-NEXT:  entry:
+// AARCH64-NEXT:    [[RETVAL:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[A_ADDR:%.*]] = alloca half, align 2
+// AARCH64-NEXT:    [[B_ADDR:%.*]] = alloca half, align 2
+// AARCH64-NEXT:    store half [[A:%.*]], half* [[A_ADDR]], align 2
+// AARCH64-NEXT:    store half [[B:%.*]], half* [[B_ADDR]], align 2
+// AARCH64-NEXT:    [[TMP0:%.*]] = load half, half* [[A_ADDR]], align 2
+// AARCH64-NEXT:    [[TMP1:%.*]] = load half, half* [[B_ADDR]], align 2
+// AARCH64-NEXT:    [[MUL:%.*]] = fmul half [[TMP0]], [[TMP1]]
+// AARCH64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 0
+// AARCH64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 1
+// AARCH64-NEXT:    store half [[MUL]], half* [[RETVAL_REALP]], align 2
+// AARCH64-NEXT:    store half 0xH0000, half* [[RETVAL_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP2:%.*]] = load { half, half }, { half, half }* [[RETVAL]], align 2
+// AARCH64-NEXT:    ret { half, half } [[TMP2]]
+//
+_Float16 _Complex mul_float_rr(_Float16 a, _Float16 b) {
+  return a * b;
+}
+// AARCH64-LABEL: @mul_float_cr(
+// AARCH64-NEXT:  entry:
+// AARCH64-NEXT:    [[RETVAL:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[A:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[B_ADDR:%.*]] = alloca half, align 2
+// AARCH64-NEXT:    [[TMP0:%.*]] = bitcast { half, half }* [[A]] to [2 x half]*
+// AARCH64-NEXT:    store [2 x half] [[A_COERCE:%.*]], [2 x half]* [[TMP0]], align 2
+// AARCH64-NEXT:    store half [[B:%.*]], half* [[B_ADDR]], align 2
+// AARCH64-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[A]], i32 0, i32 0
+// AARCH64-NEXT:    [[A_REAL:%.*]] = load half, half* [[A_REALP]], align 2
+// AARCH64-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[A]], i32 0, i32 1
+// AARCH64-NEXT:    [[A_IMAG:%.*]] = load half, half* [[A_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP1:%.*]] = load half, half* [[B_ADDR]], align 2
+// AARCH64-NEXT:    [[MUL_RL:%.*]] = fmul half [[A_REAL]], [[TMP1]]
+// AARCH64-NEXT:    [[MUL_IL:%.*]] = fmul half [[A_IMAG]], [[TMP1]]
+// AARCH64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 0
+// AARCH64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 1
+// AARCH64-NEXT:    store half [[MUL_RL]], half* [[RETVAL_REALP]], align 2
+// AARCH64-NEXT:    store half [[MUL_IL]], half* [[RETVAL_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP2:%.*]] = load { half, half }, { half, half }* [[RETVAL]], align 2
+// AARCH64-NEXT:    ret { half, half } [[TMP2]]
+//
+_Float16 _Complex mul_float_cr(_Float16 _Complex a, _Float16 b) {
+  return a * b;
+}
+// AARCH64-LABEL: @mul_float_rc(
+// AARCH64-NEXT:  entry:
+// AARCH64-NEXT:    [[RETVAL:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[B:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[A_ADDR:%.*]] = alloca half, align 2
+// AARCH64-NEXT:    [[TMP0:%.*]] = bitcast { half, half }* [[B]] to [2 x half]*
+// AARCH64-NEXT:    store [2 x half] [[B_COERCE:%.*]], [2 x half]* [[TMP0]], align 2
+// AARCH64-NEXT:    store half [[A:%.*]], half* [[A_ADDR]], align 2
+// AARCH64-NEXT:    [[TMP1:%.*]] = load half, half* [[A_ADDR]], align 2
+// AARCH64-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[B]], i32 0, i32 0
+// AARCH64-NEXT:    [[B_REAL:%.*]] = load half, half* [[B_REALP]], align 2
+// AARCH64-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[B]], i32 0, i32 1
+// AARCH64-NEXT:    [[B_IMAG:%.*]] = load half, half* [[B_IMAGP]], align 2
+// AARCH64-NEXT:    [[MUL_RL:%.*]] = fmul half [[TMP1]], [[B_REAL]]
+// AARCH64-NEXT:    [[MUL_IR:%.*]] = fmul half [[TMP1]], [[B_IMAG]]
+// AARCH64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 0
+// AARCH64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 1
+// AARCH64-NEXT:    store half [[MUL_RL]], half* [[RETVAL_REALP]], align 2
+// AARCH64-NEXT:    store half [[MUL_IR]], half* [[RETVAL_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP2:%.*]] = load { half, half }, { half, half }* [[RETVAL]], align 2
+// AARCH64-NEXT:    ret { half, half } [[TMP2]]
+//
+_Float16 _Complex mul_float_rc(_Float16 a, _Float16 _Complex b) {
+  return a * b;
+}
+// AARCH64-LABEL: @mul_float_cc(
+// AARCH64-NEXT:  entry:
+// AARCH64-NEXT:    [[RETVAL:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[A:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[B:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[TMP0:%.*]] = bitcast { half, half }* [[A]] to [2 x half]*
+// AARCH64-NEXT:    store [2 x half] [[A_COERCE:%.*]], [2 x half]* [[TMP0]], align 2
+// AARCH64-NEXT:    [[TMP1:%.*]] = bitcast { half, half }* [[B]] to [2 x half]*
+// AARCH64-NEXT:    store [2 x half] [[B_COERCE:%.*]], [2 x half]* [[TMP1]], align 2
+// AARCH64-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[A]], i32 0, i32 0
+// AARCH64-NEXT:    [[A_REAL:%.*]] = load half, half* [[A_REALP]], align 2
+// AARCH64-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[A]], i32 0, i32 1
+// AARCH64-NEXT:    [[A_IMAG:%.*]] = load half, half* [[A_IMAGP]], align 2
+// AARCH64-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[B]], i32 0, i32 0
+// AARCH64-NEXT:    [[B_REAL:%.*]] = load half, half* [[B_REALP]], align 2
+// AARCH64-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[B]], i32 0, i32 1
+// AARCH64-NEXT:    [[B_IMAG:%.*]] = load half, half* [[B_IMAGP]], align 2
+// AARCH64-NEXT:    [[MUL_AC:%.*]] = fmul half [[A_REAL]], [[B_REAL]]
+// AARCH64-NEXT:    [[MUL_BD:%.*]] = fmul half [[A_IMAG]], [[B_IMAG]]
+// AARCH64-NEXT:    [[MUL_AD:%.*]] = fmul half [[A_REAL]], [[B_IMAG]]
+// AARCH64-NEXT:    [[MUL_BC:%.*]] = fmul half [[A_IMAG]], [[B_REAL]]
+// AARCH64-NEXT:    [[MUL_R:%.*]] = fsub half [[MUL_AC]], [[MUL_BD]]
+// AARCH64-NEXT:    [[MUL_I:%.*]] = fadd half [[MUL_AD]], [[MUL_BC]]
+// AARCH64-NEXT:    [[ISNAN_CMP:%.*]] = fcmp uno half [[MUL_R]], [[MUL_R]]
+// AARCH64-NEXT:    br i1 [[ISNAN_CMP]], label [[COMPLEX_MUL_IMAG_NAN:%.*]], label [[COMPLEX_MUL_CONT:%.*]], !prof [[PROF6:![0-9]+]]
+// AARCH64:       complex_mul_imag_nan:
+// AARCH64-NEXT:    [[ISNAN_CMP1:%.*]] = fcmp uno half [[MUL_I]], [[MUL_I]]
+// AARCH64-NEXT:    br i1 [[ISNAN_CMP1]], label [[COMPLEX_MUL_LIBCALL:%.*]], label [[COMPLEX_MUL_CONT]], !prof [[PROF6]]
+// AARCH64:       complex_mul_libcall:
+// AARCH64-NEXT:    [[CALL:%.*]] = call { half, half } @__mulhc3(half noundef [[A_REAL]], half noundef [[A_IMAG]], half noundef [[B_REAL]], half noundef [[B_IMAG]]) #[[ATTR1:[0-9]+]]
+// AARCH64-NEXT:    [[TMP2:%.*]] = extractvalue { half, half } [[CALL]], 0
+// AARCH64-NEXT:    [[TMP3:%.*]] = extractvalue { half, half } [[CALL]], 1
+// AARCH64-NEXT:    br label [[COMPLEX_MUL_CONT]]
+// AARCH64:       complex_mul_cont:
+// AARCH64-NEXT:    [[REAL_MUL_PHI:%.*]] = phi half [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP2]], [[COMPLEX_MUL_LIBCALL]] ]
+// AARCH64-NEXT:    [[IMAG_MUL_PHI:%.*]] = phi half [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP3]], [[COMPLEX_MUL_LIBCALL]] ]
+// AARCH64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 0
+// AARCH64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 1
+// AARCH64-NEXT:    store half [[REAL_MUL_PHI]], half* [[RETVAL_REALP]], align 2
+// AARCH64-NEXT:    store half [[IMAG_MUL_PHI]], half* [[RETVAL_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP4:%.*]] = load { half, half }, { half, half }* [[RETVAL]], align 2
+// AARCH64-NEXT:    ret { half, half } [[TMP4]]
+//
+_Float16 _Complex mul_float_cc(_Float16 _Complex a, _Float16 _Complex b) {
+  return a * b;
+}
+
+// AARCH64-LABEL: @div_float_rr(
+// AARCH64-NEXT:  entry:
+// AARCH64-NEXT:    [[RETVAL:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[A_ADDR:%.*]] = alloca half, align 2
+// AARCH64-NEXT:    [[B_ADDR:%.*]] = alloca half, align 2
+// AARCH64-NEXT:    store half [[A:%.*]], half* [[A_ADDR]], align 2
+// AARCH64-NEXT:    store half [[B:%.*]], half* [[B_ADDR]], align 2
+// AARCH64-NEXT:    [[TMP0:%.*]] = load half, half* [[A_ADDR]], align 2
+// AARCH64-NEXT:    [[TMP1:%.*]] = load half, half* [[B_ADDR]], align 2
+// AARCH64-NEXT:    [[DIV:%.*]] = fdiv half [[TMP0]], [[TMP1]]
+// AARCH64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 0
+// AARCH64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 1
+// AARCH64-NEXT:    store half [[DIV]], half* [[RETVAL_REALP]], align 2
+// AARCH64-NEXT:    store half 0xH0000, half* [[RETVAL_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP2:%.*]] = load { half, half }, { half, half }* [[RETVAL]], align 2
+// AARCH64-NEXT:    ret { half, half } [[TMP2]]
+//
+_Float16 _Complex div_float_rr(_Float16 a, _Float16 b) {
+  return a / b;
+}
+// AARCH64-LABEL: @div_float_cr(
+// AARCH64-NEXT:  entry:
+// AARCH64-NEXT:    [[RETVAL:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[A:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[B_ADDR:%.*]] = alloca half, align 2
+// AARCH64-NEXT:    [[TMP0:%.*]] = bitcast { half, half }* [[A]] to [2 x half]*
+// AARCH64-NEXT:    store [2 x half] [[A_COERCE:%.*]], [2 x half]* [[TMP0]], align 2
+// AARCH64-NEXT:    store half [[B:%.*]], half* [[B_ADDR]], align 2
+// AARCH64-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[A]], i32 0, i32 0
+// AARCH64-NEXT:    [[A_REAL:%.*]] = load half, half* [[A_REALP]], align 2
+// AARCH64-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[A]], i32 0, i32 1
+// AARCH64-NEXT:    [[A_IMAG:%.*]] = load half, half* [[A_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP1:%.*]] = load half, half* [[B_ADDR]], align 2
+// AARCH64-NEXT:    [[TMP2:%.*]] = fdiv half [[A_REAL]], [[TMP1]]
+// AARCH64-NEXT:    [[TMP3:%.*]] = fdiv half [[A_IMAG]], [[TMP1]]
+// AARCH64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 0
+// AARCH64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 1
+// AARCH64-NEXT:    store half [[TMP2]], half* [[RETVAL_REALP]], align 2
+// AARCH64-NEXT:    store half [[TMP3]], half* [[RETVAL_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP4:%.*]] = load { half, half }, { half, half }* [[RETVAL]], align 2
+// AARCH64-NEXT:    ret { half, half } [[TMP4]]
+//
+_Float16 _Complex div_float_cr(_Float16 _Complex a, _Float16 b) {
+  return a / b;
+}
+// AARCH64-LABEL: @div_float_rc(
+// AARCH64-NEXT:  entry:
+// AARCH64-NEXT:    [[RETVAL:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[B:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[A_ADDR:%.*]] = alloca half, align 2
+// AARCH64-NEXT:    [[TMP0:%.*]] = bitcast { half, half }* [[B]] to [2 x half]*
+// AARCH64-NEXT:    store [2 x half] [[B_COERCE:%.*]], [2 x half]* [[TMP0]], align 2
+// AARCH64-NEXT:    store half [[A:%.*]], half* [[A_ADDR]], align 2
+// AARCH64-NEXT:    [[TMP1:%.*]] = load half, half* [[A_ADDR]], align 2
+// AARCH64-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[B]], i32 0, i32 0
+// AARCH64-NEXT:    [[B_REAL:%.*]] = load half, half* [[B_REALP]], align 2
+// AARCH64-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[B]], i32 0, i32 1
+// AARCH64-NEXT:    [[B_IMAG:%.*]] = load half, half* [[B_IMAGP]], align 2
+// AARCH64-NEXT:    [[CALL:%.*]] = call { half, half } @__divhc3(half noundef [[TMP1]], half noundef 0xH0000, half noundef [[B_REAL]], half noundef [[B_IMAG]]) #[[ATTR1]]
+// AARCH64-NEXT:    [[TMP2:%.*]] = extractvalue { half, half } [[CALL]], 0
+// AARCH64-NEXT:    [[TMP3:%.*]] = extractvalue { half, half } [[CALL]], 1
+// AARCH64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 0
+// AARCH64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 1
+// AARCH64-NEXT:    store half [[TMP2]], half* [[RETVAL_REALP]], align 2
+// AARCH64-NEXT:    store half [[TMP3]], half* [[RETVAL_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP4:%.*]] = load { half, half }, { half, half }* [[RETVAL]], align 2
+// AARCH64-NEXT:    ret { half, half } [[TMP4]]
+//
+_Float16 _Complex div_float_rc(_Float16 a, _Float16 _Complex b) {
+  return a / b;
+}
+// AARCH64-LABEL: @div_float_cc(
+// AARCH64-NEXT:  entry:
+// AARCH64-NEXT:    [[RETVAL:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[A:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[B:%.*]] = alloca { half, half }, align 2
+// AARCH64-NEXT:    [[TMP0:%.*]] = bitcast { half, half }* [[A]] to [2 x half]*
+// AARCH64-NEXT:    store [2 x half] [[A_COERCE:%.*]], [2 x half]* [[TMP0]], align 2
+// AARCH64-NEXT:    [[TMP1:%.*]] = bitcast { half, half }* [[B]] to [2 x half]*
+// AARCH64-NEXT:    store [2 x half] [[B_COERCE:%.*]], [2 x half]* [[TMP1]], align 2
+// AARCH64-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[A]], i32 0, i32 0
+// AARCH64-NEXT:    [[A_REAL:%.*]] = load half, half* [[A_REALP]], align 2
+// AARCH64-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[A]], i32 0, i32 1
+// AARCH64-NEXT:    [[A_IMAG:%.*]] = load half, half* [[A_IMAGP]], align 2
+// AARCH64-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[B]], i32 0, i32 0
+// AARCH64-NEXT:    [[B_REAL:%.*]] = load half, half* [[B_REALP]], align 2
+// AARCH64-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[B]], i32 0, i32 1
+// AARCH64-NEXT:    [[B_IMAG:%.*]] = load half, half* [[B_IMAGP]], align 2
+// AARCH64-NEXT:    [[CALL:%.*]] = call { half, half } @__divhc3(half noundef [[A_REAL]], half noundef [[A_IMAG]], half noundef [[B_REAL]], half noundef [[B_IMAG]]) #[[ATTR1]]
+// AARCH64-NEXT:    [[TMP2:%.*]] = extractvalue { half, half } [[CALL]], 0
+// AARCH64-NEXT:    [[TMP3:%.*]] = extractvalue { half, half } [[CALL]], 1
+// AARCH64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 0
+// AARCH64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { half, half }, { half, half }* [[RETVAL]], i32 0, i32 1
+// AARCH64-NEXT:    store half [[TMP2]], half* [[RETVAL_REALP]], align 2
+// AARCH64-NEXT:    store half [[TMP3]], half* [[RETVAL_IMAGP]], align 2
+// AARCH64-NEXT:    [[TMP4:%.*]] = load { half, half }, { half, half }* [[RETVAL]], align 2
+// AARCH64-NEXT:    ret { half, half } [[TMP4]]
+//
+_Float16 _Complex div_float_cc(_Float16 _Complex a, _Float16 _Complex b) {
+  return a / b;
+}
Index: clang/lib/AST/ASTContext.cpp
===================================================================
--- clang/lib/AST/ASTContext.cpp
+++ clang/lib/AST/ASTContext.cpp
@@ -6808,8 +6808,8 @@
   if (Domain->isComplexType()) {
     switch (EltRank) {
     case BFloat16Rank: llvm_unreachable("Complex bfloat16 is not supported");
-    case Float16Rank:
     case HalfRank: llvm_unreachable("Complex half is not supported");
+    case Float16Rank:    return getComplexType(Float16Ty);
     case Ibm128Rank:     return getComplexType(Ibm128Ty);
     case FloatRank:      return getComplexType(FloatTy);
     case DoubleRank:     return getComplexType(DoubleTy);
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to