sepavloff updated this revision to Diff 357857.
sepavloff added a comment.

Rebased


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D104854/new/

https://reviews.llvm.org/D104854

Files:
  clang/lib/CodeGen/CGBuiltin.cpp
  clang/test/CodeGen/X86/strictfp_builtins.c
  clang/test/CodeGen/aarch64-strictfp-builtins.c
  clang/test/CodeGen/strictfp_builtins.c
  llvm/docs/LangRef.rst
  llvm/include/llvm/CodeGen/ISDOpcodes.h
  llvm/include/llvm/CodeGen/TargetLowering.h
  llvm/include/llvm/IR/Intrinsics.td
  llvm/lib/Analysis/ConstantFolding.cpp
  llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
  llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
  llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
  llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
  llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
  llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
  llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
  llvm/lib/CodeGen/TargetLoweringBase.cpp
  llvm/lib/Target/X86/X86ISelLowering.cpp
  llvm/test/CodeGen/AArch64/aarch64-fpclass.ll
  llvm/test/CodeGen/PowerPC/ppc-fpclass.ll
  llvm/test/CodeGen/X86/x86-fpclass.ll
  llvm/test/Transforms/InstSimplify/ConstProp/fpclassify.ll

Index: llvm/test/Transforms/InstSimplify/ConstProp/fpclassify.ll
===================================================================
--- /dev/null
+++ llvm/test/Transforms/InstSimplify/ConstProp/fpclassify.ll
@@ -0,0 +1,28 @@
+; RUN: opt < %s -instsimplify -S | FileCheck %s
+
+define i1 @isnan_01() {
+entry:
+  %0 = tail call i1 @llvm.isnan.f32(float 0x7FF8000000000000)
+  ret i1 %0
+}
+; CHECK-LABEL: isnan_01
+; CHECK:       ret i1 true 
+
+define i1 @isnan_02() {
+entry:
+  %0 = tail call i1 @llvm.isnan.f32(float 0x7FF0000000000000)
+  ret i1 %0
+}
+; CHECK-LABEL: isnan_02
+; CHECK:       ret i1 false 
+
+define <4 x i1> @isnan_03() {
+entry:
+  %0 = tail call <4 x i1> @llvm.isnan.v4f32(<4 x float><float 0x7FF8000000000000, float 0x7FF0000000000000, float 1.0, float 0xFFF8000000000000>)
+  ret <4 x i1> %0
+}
+; CHECK-LABEL: isnan_03
+; CHECK:       ret <4 x i1> <i1 true, i1 false, i1 false, i1 true>
+
+declare i1 @llvm.isnan.f32(float)
+declare <4 x i1> @llvm.isnan.v4f32(<4 x float>)
Index: llvm/test/CodeGen/X86/x86-fpclass.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/X86/x86-fpclass.ll
@@ -0,0 +1,655 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686 | FileCheck %s -check-prefix=CHECK-32
+; RUN: llc < %s -mtriple=x86_64 | FileCheck %s -check-prefix=CHECK-64
+
+define i1 @isnan_float(float %x) {
+; CHECK-32-LABEL: isnan_float:
+; CHECK-32:       # %bb.0: # %entry
+; CHECK-32-NEXT:    flds {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    fucomp %st(0)
+; CHECK-32-NEXT:    fnstsw %ax
+; CHECK-32-NEXT:    # kill: def $ah killed $ah killed $ax
+; CHECK-32-NEXT:    sahf
+; CHECK-32-NEXT:    setp %al
+; CHECK-32-NEXT:    retl
+;
+; CHECK-64-LABEL: isnan_float:
+; CHECK-64:       # %bb.0: # %entry
+; CHECK-64-NEXT:    ucomiss %xmm0, %xmm0
+; CHECK-64-NEXT:    setp %al
+; CHECK-64-NEXT:    retq
+; NOSSE-32-LABEL: isnan_float:
+entry:
+  %0 = tail call i1 @llvm.isnan.f32(float %x)
+  ret i1 %0
+}
+
+define i1 @isnan_double(double %x) {
+; CHECK-32-LABEL: isnan_double:
+; CHECK-32:       # %bb.0: # %entry
+; CHECK-32-NEXT:    fldl {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    fucomp %st(0)
+; CHECK-32-NEXT:    fnstsw %ax
+; CHECK-32-NEXT:    # kill: def $ah killed $ah killed $ax
+; CHECK-32-NEXT:    sahf
+; CHECK-32-NEXT:    setp %al
+; CHECK-32-NEXT:    retl
+;
+; CHECK-64-LABEL: isnan_double:
+; CHECK-64:       # %bb.0: # %entry
+; CHECK-64-NEXT:    ucomisd %xmm0, %xmm0
+; CHECK-64-NEXT:    setp %al
+; CHECK-64-NEXT:    retq
+entry:
+  %0 = tail call i1 @llvm.isnan.f64(double %x)
+  ret i1 %0
+}
+
+define i1 @isnan_ldouble(x86_fp80 %x) {
+; CHECK-32-LABEL: isnan_ldouble:
+; CHECK-32:       # %bb.0: # %entry
+; CHECK-32-NEXT:    fldt {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    fxam
+; CHECK-32-NEXT:    fstp %st(0)
+; CHECK-32-NEXT:    fnstsw %ax
+; CHECK-32-NEXT:    andb $69, %ah
+; CHECK-32-NEXT:    cmpb $1, %ah
+; CHECK-32-NEXT:    sete %al
+; CHECK-32-NEXT:    retl
+;
+; CHECK-64-LABEL: isnan_ldouble:
+; CHECK-64:       # %bb.0: # %entry
+; CHECK-64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; CHECK-64-NEXT:    fxam
+; CHECK-64-NEXT:    fstp %st(0)
+; CHECK-64-NEXT:    fnstsw %ax
+; CHECK-64-NEXT:    andb $69, %ah
+; CHECK-64-NEXT:    cmpb $1, %ah
+; CHECK-64-NEXT:    sete %al
+; CHECK-64-NEXT:    retq
+entry:
+  %0 = tail call i1 @llvm.isnan.f80(x86_fp80 %x)
+  ret i1 %0
+}
+
+define i1 @isnan_float_strictfp(float %x) strictfp {
+; CHECK-32-LABEL: isnan_float_strictfp:
+; CHECK-32:       # %bb.0: # %entry
+; CHECK-32-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
+; CHECK-32-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; CHECK-32-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; CHECK-32-NEXT:    setne %al
+; CHECK-32-NEXT:    retl
+;
+; CHECK-64-LABEL: isnan_float_strictfp:
+; CHECK-64:       # %bb.0: # %entry
+; CHECK-64-NEXT:    movd %xmm0, %eax
+; CHECK-64-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; CHECK-64-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; CHECK-64-NEXT:    setne %al
+; CHECK-64-NEXT:    retq
+; NOSSE-32-LABEL: isnan_float_strictfp:
+entry:
+  %0 = tail call i1 @llvm.isnan.f32(float %x)
+  ret i1 %0
+}
+
+define i1 @isnan_double_strictfp(double %x) strictfp {
+; CHECK-32-LABEL: isnan_double_strictfp:
+; CHECK-32:       # %bb.0: # %entry
+; CHECK-32-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
+; CHECK-32-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; CHECK-32-NEXT:    xorl $2146435072, %eax # imm = 0x7FF00000
+; CHECK-32-NEXT:    orl {{[0-9]+}}(%esp), %eax
+; CHECK-32-NEXT:    setne %al
+; CHECK-32-NEXT:    retl
+;
+; CHECK-64-LABEL: isnan_double_strictfp:
+; CHECK-64:       # %bb.0: # %entry
+; CHECK-64-NEXT:    movq %xmm0, %rax
+; CHECK-64-NEXT:    movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF
+; CHECK-64-NEXT:    andq %rax, %rcx
+; CHECK-64-NEXT:    movabsq $9218868437227405312, %rax # imm = 0x7FF0000000000000
+; CHECK-64-NEXT:    cmpq %rax, %rcx
+; CHECK-64-NEXT:    setne %al
+; CHECK-64-NEXT:    retq
+entry:
+  %0 = tail call i1 @llvm.isnan.f64(double %x)
+  ret i1 %0
+}
+
+define i1 @isnan_ldouble_strictfp(x86_fp80 %x) strictfp {
+; CHECK-32-LABEL: isnan_ldouble_strictfp:
+; CHECK-32:       # %bb.0: # %entry
+; CHECK-32-NEXT:    fldt {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    fxam
+; CHECK-32-NEXT:    fstp %st(0)
+; CHECK-32-NEXT:    wait
+; CHECK-32-NEXT:    fnstsw %ax
+; CHECK-32-NEXT:    andb $69, %ah
+; CHECK-32-NEXT:    cmpb $1, %ah
+; CHECK-32-NEXT:    sete %al
+; CHECK-32-NEXT:    retl
+;
+; CHECK-64-LABEL: isnan_ldouble_strictfp:
+; CHECK-64:       # %bb.0: # %entry
+; CHECK-64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; CHECK-64-NEXT:    fxam
+; CHECK-64-NEXT:    fstp %st(0)
+; CHECK-64-NEXT:    wait
+; CHECK-64-NEXT:    fnstsw %ax
+; CHECK-64-NEXT:    andb $69, %ah
+; CHECK-64-NEXT:    cmpb $1, %ah
+; CHECK-64-NEXT:    sete %al
+; CHECK-64-NEXT:    retq
+entry:
+  %0 = tail call i1 @llvm.isnan.f80(x86_fp80 %x)
+  ret i1 %0
+}
+
+define <1 x i1> @isnan_float_vec1(<1 x float> %x) {
+; CHECK-32-LABEL: isnan_float_vec1:
+; CHECK-32:       # %bb.0: # %entry
+; CHECK-32-NEXT:    flds {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    fucomp %st(0)
+; CHECK-32-NEXT:    fnstsw %ax
+; CHECK-32-NEXT:    # kill: def $ah killed $ah killed $ax
+; CHECK-32-NEXT:    sahf
+; CHECK-32-NEXT:    setp %al
+; CHECK-32-NEXT:    retl
+;
+; CHECK-64-LABEL: isnan_float_vec1:
+; CHECK-64:       # %bb.0: # %entry
+; CHECK-64-NEXT:    ucomiss %xmm0, %xmm0
+; CHECK-64-NEXT:    setp %al
+; CHECK-64-NEXT:    retq
+entry:
+  %0 = tail call <1 x i1> @llvm.isnan.v1f32(<1 x float> %x)
+  ret <1 x i1> %0
+}
+
+define <1 x i1> @isnan_double_vec1(<1 x double> %x) {
+; CHECK-32-LABEL: isnan_double_vec1:
+; CHECK-32:       # %bb.0: # %entry
+; CHECK-32-NEXT:    fldl {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    fucomp %st(0)
+; CHECK-32-NEXT:    fnstsw %ax
+; CHECK-32-NEXT:    # kill: def $ah killed $ah killed $ax
+; CHECK-32-NEXT:    sahf
+; CHECK-32-NEXT:    setp %al
+; CHECK-32-NEXT:    retl
+;
+; CHECK-64-LABEL: isnan_double_vec1:
+; CHECK-64:       # %bb.0: # %entry
+; CHECK-64-NEXT:    ucomisd %xmm0, %xmm0
+; CHECK-64-NEXT:    setp %al
+; CHECK-64-NEXT:    retq
+entry:
+  %0 = tail call <1 x i1> @llvm.isnan.v1f64(<1 x double> %x)
+  ret <1 x i1> %0
+}
+
+define <2 x i1> @isnan_float_vec2(<2 x float> %x) {
+; CHECK-32-LABEL: isnan_float_vec2:
+; CHECK-32:       # %bb.0: # %entry
+; CHECK-32-NEXT:    flds {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    flds {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    fucomp %st(0)
+; CHECK-32-NEXT:    fnstsw %ax
+; CHECK-32-NEXT:    # kill: def $ah killed $ah killed $ax
+; CHECK-32-NEXT:    sahf
+; CHECK-32-NEXT:    setp %cl
+; CHECK-32-NEXT:    fucomp %st(0)
+; CHECK-32-NEXT:    fnstsw %ax
+; CHECK-32-NEXT:    # kill: def $ah killed $ah killed $ax
+; CHECK-32-NEXT:    sahf
+; CHECK-32-NEXT:    setp %dl
+; CHECK-32-NEXT:    movl %ecx, %eax
+; CHECK-32-NEXT:    retl
+;
+; CHECK-64-LABEL: isnan_float_vec2:
+; CHECK-64:       # %bb.0: # %entry
+; CHECK-64-NEXT:    xorps %xmm1, %xmm1
+; CHECK-64-NEXT:    cmpunordps %xmm0, %xmm1
+; CHECK-64-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1,1,3]
+; CHECK-64-NEXT:    movaps %xmm1, %xmm0
+; CHECK-64-NEXT:    retq
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2f32(<2 x float> %x)
+  ret <2 x i1> %0
+}
+
+define <2 x i1> @isnan_double_vec2(<2 x double> %x) {
+; CHECK-32-LABEL: isnan_double_vec2:
+; CHECK-32:       # %bb.0: # %entry
+; CHECK-32-NEXT:    fldl {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    fldl {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    fucomp %st(0)
+; CHECK-32-NEXT:    fnstsw %ax
+; CHECK-32-NEXT:    # kill: def $ah killed $ah killed $ax
+; CHECK-32-NEXT:    sahf
+; CHECK-32-NEXT:    setp %cl
+; CHECK-32-NEXT:    fucomp %st(0)
+; CHECK-32-NEXT:    fnstsw %ax
+; CHECK-32-NEXT:    # kill: def $ah killed $ah killed $ax
+; CHECK-32-NEXT:    sahf
+; CHECK-32-NEXT:    setp %dl
+; CHECK-32-NEXT:    movl %ecx, %eax
+; CHECK-32-NEXT:    retl
+;
+; CHECK-64-LABEL: isnan_double_vec2:
+; CHECK-64:       # %bb.0: # %entry
+; CHECK-64-NEXT:    xorpd %xmm1, %xmm1
+; CHECK-64-NEXT:    cmpunordpd %xmm1, %xmm0
+; CHECK-64-NEXT:    retq
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2f64(<2 x double> %x)
+  ret <2 x i1> %0
+}
+
+define <4 x i1> @isnan_float_vec4(<4 x float> %x) {
+; CHECK-32-LABEL: isnan_float_vec4:
+; CHECK-32:       # %bb.0: # %entry
+; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; CHECK-32-NEXT:    flds {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    flds {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    flds {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    flds {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    fucomp %st(0)
+; CHECK-32-NEXT:    fnstsw %ax
+; CHECK-32-NEXT:    # kill: def $ah killed $ah killed $ax
+; CHECK-32-NEXT:    sahf
+; CHECK-32-NEXT:    setp %dl
+; CHECK-32-NEXT:    fucomp %st(0)
+; CHECK-32-NEXT:    fnstsw %ax
+; CHECK-32-NEXT:    # kill: def $ah killed $ah killed $ax
+; CHECK-32-NEXT:    sahf
+; CHECK-32-NEXT:    setp %dh
+; CHECK-32-NEXT:    addb %dh, %dh
+; CHECK-32-NEXT:    orb %dl, %dh
+; CHECK-32-NEXT:    fucomp %st(0)
+; CHECK-32-NEXT:    fnstsw %ax
+; CHECK-32-NEXT:    # kill: def $ah killed $ah killed $ax
+; CHECK-32-NEXT:    sahf
+; CHECK-32-NEXT:    setp %dl
+; CHECK-32-NEXT:    fucomp %st(0)
+; CHECK-32-NEXT:    fnstsw %ax
+; CHECK-32-NEXT:    # kill: def $ah killed $ah killed $ax
+; CHECK-32-NEXT:    sahf
+; CHECK-32-NEXT:    setp %al
+; CHECK-32-NEXT:    addb %al, %al
+; CHECK-32-NEXT:    orb %dl, %al
+; CHECK-32-NEXT:    shlb $2, %al
+; CHECK-32-NEXT:    orb %dh, %al
+; CHECK-32-NEXT:    movb %al, (%ecx)
+; CHECK-32-NEXT:    movl %ecx, %eax
+; CHECK-32-NEXT:    retl $4
+;
+; CHECK-64-LABEL: isnan_float_vec4:
+; CHECK-64:       # %bb.0: # %entry
+; CHECK-64-NEXT:    xorps %xmm1, %xmm1
+; CHECK-64-NEXT:    cmpunordps %xmm1, %xmm0
+; CHECK-64-NEXT:    retq
+entry:
+  %0 = tail call <4 x i1> @llvm.isnan.v4f32(<4 x float> %x)
+  ret <4 x i1> %0
+}
+
+define <4 x i1> @isnan_double_vec4(<4 x double> %x) {
+; CHECK-32-LABEL: isnan_double_vec4:
+; CHECK-32:       # %bb.0: # %entry
+; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; CHECK-32-NEXT:    fldl {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    fldl {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    fldl {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    fldl {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    fucomp %st(0)
+; CHECK-32-NEXT:    fnstsw %ax
+; CHECK-32-NEXT:    # kill: def $ah killed $ah killed $ax
+; CHECK-32-NEXT:    sahf
+; CHECK-32-NEXT:    setp %dl
+; CHECK-32-NEXT:    fucomp %st(0)
+; CHECK-32-NEXT:    fnstsw %ax
+; CHECK-32-NEXT:    # kill: def $ah killed $ah killed $ax
+; CHECK-32-NEXT:    sahf
+; CHECK-32-NEXT:    setp %dh
+; CHECK-32-NEXT:    addb %dh, %dh
+; CHECK-32-NEXT:    orb %dl, %dh
+; CHECK-32-NEXT:    fucomp %st(0)
+; CHECK-32-NEXT:    fnstsw %ax
+; CHECK-32-NEXT:    # kill: def $ah killed $ah killed $ax
+; CHECK-32-NEXT:    sahf
+; CHECK-32-NEXT:    setp %dl
+; CHECK-32-NEXT:    fucomp %st(0)
+; CHECK-32-NEXT:    fnstsw %ax
+; CHECK-32-NEXT:    # kill: def $ah killed $ah killed $ax
+; CHECK-32-NEXT:    sahf
+; CHECK-32-NEXT:    setp %al
+; CHECK-32-NEXT:    addb %al, %al
+; CHECK-32-NEXT:    orb %dl, %al
+; CHECK-32-NEXT:    shlb $2, %al
+; CHECK-32-NEXT:    orb %dh, %al
+; CHECK-32-NEXT:    movb %al, (%ecx)
+; CHECK-32-NEXT:    movl %ecx, %eax
+; CHECK-32-NEXT:    retl $4
+;
+; CHECK-64-LABEL: isnan_double_vec4:
+; CHECK-64:       # %bb.0: # %entry
+; CHECK-64-NEXT:    xorpd %xmm2, %xmm2
+; CHECK-64-NEXT:    cmpunordpd %xmm2, %xmm1
+; CHECK-64-NEXT:    cmpunordpd %xmm2, %xmm0
+; CHECK-64-NEXT:    packssdw %xmm1, %xmm0
+; CHECK-64-NEXT:    retq
+entry:
+  %0 = tail call <4 x i1> @llvm.isnan.v4f64(<4 x double> %x)
+  ret <4 x i1> %0
+}
+
+
+define <1 x i1> @isnan_float_vec1_strictfp(<1 x float> %x) strictfp {
+; CHECK-32-LABEL: isnan_float_vec1_strictfp:
+; CHECK-32:       # %bb.0: # %entry
+; CHECK-32-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
+; CHECK-32-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; CHECK-32-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; CHECK-32-NEXT:    setne %al
+; CHECK-32-NEXT:    retl
+;
+; CHECK-64-LABEL: isnan_float_vec1_strictfp:
+; CHECK-64:       # %bb.0: # %entry
+; CHECK-64-NEXT:    movd %xmm0, %eax
+; CHECK-64-NEXT:    andl $2147483647, %eax # imm = 0x7FFFFFFF
+; CHECK-64-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; CHECK-64-NEXT:    setne %al
+; CHECK-64-NEXT:    retq
+entry:
+  %0 = tail call <1 x i1> @llvm.isnan.v1f32(<1 x float> %x)
+  ret <1 x i1> %0
+}
+
+define <1 x i1> @isnan_double_vec1_strictfp(<1 x double> %x) strictfp {
+; CHECK-32-LABEL: isnan_double_vec1_strictfp:
+; CHECK-32:       # %bb.0: # %entry
+; CHECK-32-NEXT:    pushl %ebp
+; CHECK-32-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-32-NEXT:    .cfi_offset %ebp, -8
+; CHECK-32-NEXT:    movl %esp, %ebp
+; CHECK-32-NEXT:    .cfi_def_cfa_register %ebp
+; CHECK-32-NEXT:    andl $-8, %esp
+; CHECK-32-NEXT:    subl $8, %esp
+; CHECK-32-NEXT:    fldl 8(%ebp)
+; CHECK-32-NEXT:    fstpl (%esp)
+; CHECK-32-NEXT:    wait
+; CHECK-32-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
+; CHECK-32-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; CHECK-32-NEXT:    xorl %ecx, %ecx
+; CHECK-32-NEXT:    subl (%esp), %ecx
+; CHECK-32-NEXT:    movl $2146435072, %edx # imm = 0x7FF00000
+; CHECK-32-NEXT:    sbbl %eax, %edx
+; CHECK-32-NEXT:    orl %ecx, %edx
+; CHECK-32-NEXT:    setne %al
+; CHECK-32-NEXT:    movl %ebp, %esp
+; CHECK-32-NEXT:    popl %ebp
+; CHECK-32-NEXT:    .cfi_def_cfa %esp, 4
+; CHECK-32-NEXT:    retl
+;
+; CHECK-64-LABEL: isnan_double_vec1_strictfp:
+; CHECK-64:       # %bb.0: # %entry
+; CHECK-64-NEXT:    movq %xmm0, %rax
+; CHECK-64-NEXT:    movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF
+; CHECK-64-NEXT:    andq %rax, %rcx
+; CHECK-64-NEXT:    movabsq $9218868437227405312, %rax # imm = 0x7FF0000000000000
+; CHECK-64-NEXT:    cmpq %rax, %rcx
+; CHECK-64-NEXT:    setne %al
+; CHECK-64-NEXT:    retq
+entry:
+  %0 = tail call <1 x i1> @llvm.isnan.v1f64(<1 x double> %x)
+  ret <1 x i1> %0
+}
+
+define <2 x i1> @isnan_float_vec2_strictfp(<2 x float> %x) strictfp {
+; CHECK-32-LABEL: isnan_float_vec2_strictfp:
+; CHECK-32:       # %bb.0: # %entry
+; CHECK-32-NEXT:    movl $2147483647, %ecx # imm = 0x7FFFFFFF
+; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK-32-NEXT:    andl %ecx, %eax
+; CHECK-32-NEXT:    cmpl $2139095040, %eax # imm = 0x7F800000
+; CHECK-32-NEXT:    setne %al
+; CHECK-32-NEXT:    andl {{[0-9]+}}(%esp), %ecx
+; CHECK-32-NEXT:    cmpl $2139095040, %ecx # imm = 0x7F800000
+; CHECK-32-NEXT:    setne %dl
+; CHECK-32-NEXT:    retl
+;
+; CHECK-64-LABEL: isnan_float_vec2_strictfp:
+; CHECK-64:       # %bb.0: # %entry
+; CHECK-64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; CHECK-64-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-64-NEXT:    movdqa {{.*#+}} xmm1 = <2139095040,2139095040,2139095040,u>
+; CHECK-64-NEXT:    psubd %xmm0, %xmm1
+; CHECK-64-NEXT:    pxor %xmm2, %xmm2
+; CHECK-64-NEXT:    pcmpeqd %xmm1, %xmm2
+; CHECK-64-NEXT:    pcmpeqd %xmm0, %xmm0
+; CHECK-64-NEXT:    pxor %xmm2, %xmm0
+; CHECK-64-NEXT:    retq
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2f32(<2 x float> %x)
+  ret <2 x i1> %0
+}
+
+define <2 x i1> @isnan_double_vec2_strictfp(<2 x double> %x) strictfp {
+; CHECK-32-LABEL: isnan_double_vec2_strictfp:
+; CHECK-32:       # %bb.0: # %entry
+; CHECK-32-NEXT:    pushl %ebp
+; CHECK-32-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-32-NEXT:    .cfi_offset %ebp, -8
+; CHECK-32-NEXT:    movl %esp, %ebp
+; CHECK-32-NEXT:    .cfi_def_cfa_register %ebp
+; CHECK-32-NEXT:    pushl %edi
+; CHECK-32-NEXT:    pushl %esi
+; CHECK-32-NEXT:    andl $-8, %esp
+; CHECK-32-NEXT:    subl $16, %esp
+; CHECK-32-NEXT:    .cfi_offset %esi, -16
+; CHECK-32-NEXT:    .cfi_offset %edi, -12
+; CHECK-32-NEXT:    fldl 8(%ebp)
+; CHECK-32-NEXT:    fstpl (%esp)
+; CHECK-32-NEXT:    fldl 16(%ebp)
+; CHECK-32-NEXT:    fstpl {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    wait
+; CHECK-32-NEXT:    movl $2147483647, %ecx # imm = 0x7FFFFFFF
+; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK-32-NEXT:    andl %ecx, %eax
+; CHECK-32-NEXT:    xorl %edx, %edx
+; CHECK-32-NEXT:    xorl %esi, %esi
+; CHECK-32-NEXT:    subl (%esp), %esi
+; CHECK-32-NEXT:    movl $2146435072, %edi # imm = 0x7FF00000
+; CHECK-32-NEXT:    sbbl %eax, %edi
+; CHECK-32-NEXT:    orl %esi, %edi
+; CHECK-32-NEXT:    movl $2146435072, %esi # imm = 0x7FF00000
+; CHECK-32-NEXT:    setne %al
+; CHECK-32-NEXT:    andl {{[0-9]+}}(%esp), %ecx
+; CHECK-32-NEXT:    subl {{[0-9]+}}(%esp), %edx
+; CHECK-32-NEXT:    sbbl %ecx, %esi
+; CHECK-32-NEXT:    orl %edx, %esi
+; CHECK-32-NEXT:    setne %dl
+; CHECK-32-NEXT:    leal -8(%ebp), %esp
+; CHECK-32-NEXT:    popl %esi
+; CHECK-32-NEXT:    popl %edi
+; CHECK-32-NEXT:    popl %ebp
+; CHECK-32-NEXT:    .cfi_def_cfa %esp, 4
+; CHECK-32-NEXT:    retl
+;
+; CHECK-64-LABEL: isnan_double_vec2_strictfp:
+; CHECK-64:       # %bb.0: # %entry
+; CHECK-64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-64-NEXT:    movdqa {{.*#+}} xmm1 = [9218868437227405312,9218868437227405312]
+; CHECK-64-NEXT:    psubq %xmm0, %xmm1
+; CHECK-64-NEXT:    pxor %xmm0, %xmm0
+; CHECK-64-NEXT:    pcmpeqd %xmm1, %xmm0
+; CHECK-64-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
+; CHECK-64-NEXT:    pand %xmm0, %xmm1
+; CHECK-64-NEXT:    pcmpeqd %xmm0, %xmm0
+; CHECK-64-NEXT:    pxor %xmm1, %xmm0
+; CHECK-64-NEXT:    retq
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2f64(<2 x double> %x)
+  ret <2 x i1> %0
+}
+
+define <4 x i1> @isnan_float_vec4_strictfp(<4 x float> %x) strictfp {
+; CHECK-32-LABEL: isnan_float_vec4_strictfp:
+; CHECK-32:       # %bb.0: # %entry
+; CHECK-32-NEXT:    pushl %esi
+; CHECK-32-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-32-NEXT:    .cfi_offset %esi, -8
+; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK-32-NEXT:    movl $2147483647, %ecx # imm = 0x7FFFFFFF
+; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; CHECK-32-NEXT:    andl %ecx, %edx
+; CHECK-32-NEXT:    cmpl $2139095040, %edx # imm = 0x7F800000
+; CHECK-32-NEXT:    setne %dl
+; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; CHECK-32-NEXT:    andl %ecx, %esi
+; CHECK-32-NEXT:    cmpl $2139095040, %esi # imm = 0x7F800000
+; CHECK-32-NEXT:    setne %dh
+; CHECK-32-NEXT:    addb %dh, %dh
+; CHECK-32-NEXT:    orb %dl, %dh
+; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; CHECK-32-NEXT:    andl %ecx, %esi
+; CHECK-32-NEXT:    cmpl $2139095040, %esi # imm = 0x7F800000
+; CHECK-32-NEXT:    setne %dl
+; CHECK-32-NEXT:    andl {{[0-9]+}}(%esp), %ecx
+; CHECK-32-NEXT:    cmpl $2139095040, %ecx # imm = 0x7F800000
+; CHECK-32-NEXT:    setne %cl
+; CHECK-32-NEXT:    addb %cl, %cl
+; CHECK-32-NEXT:    orb %dl, %cl
+; CHECK-32-NEXT:    shlb $2, %cl
+; CHECK-32-NEXT:    orb %dh, %cl
+; CHECK-32-NEXT:    movb %cl, (%eax)
+; CHECK-32-NEXT:    popl %esi
+; CHECK-32-NEXT:    .cfi_def_cfa_offset 4
+; CHECK-32-NEXT:    retl $4
+;
+; CHECK-64-LABEL: isnan_float_vec4_strictfp:
+; CHECK-64:       # %bb.0: # %entry
+; CHECK-64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-64-NEXT:    movdqa {{.*#+}} xmm1 = [2139095040,2139095040,2139095040,2139095040]
+; CHECK-64-NEXT:    psubd %xmm0, %xmm1
+; CHECK-64-NEXT:    pxor %xmm2, %xmm2
+; CHECK-64-NEXT:    pcmpeqd %xmm1, %xmm2
+; CHECK-64-NEXT:    pcmpeqd %xmm0, %xmm0
+; CHECK-64-NEXT:    pxor %xmm2, %xmm0
+; CHECK-64-NEXT:    retq
+entry:
+  %0 = tail call <4 x i1> @llvm.isnan.v4f32(<4 x float> %x)
+  ret <4 x i1> %0
+}
+
+define <4 x i1> @isnan_double_vec4_strictfp(<4 x double> %x) strictfp {
+; CHECK-32-LABEL: isnan_double_vec4_strictfp:
+; CHECK-32:       # %bb.0: # %entry
+; CHECK-32-NEXT:    pushl %ebp
+; CHECK-32-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-32-NEXT:    .cfi_offset %ebp, -8
+; CHECK-32-NEXT:    movl %esp, %ebp
+; CHECK-32-NEXT:    .cfi_def_cfa_register %ebp
+; CHECK-32-NEXT:    pushl %ebx
+; CHECK-32-NEXT:    pushl %edi
+; CHECK-32-NEXT:    pushl %esi
+; CHECK-32-NEXT:    andl $-8, %esp
+; CHECK-32-NEXT:    subl $40, %esp
+; CHECK-32-NEXT:    .cfi_offset %esi, -20
+; CHECK-32-NEXT:    .cfi_offset %edi, -16
+; CHECK-32-NEXT:    .cfi_offset %ebx, -12
+; CHECK-32-NEXT:    fldl 12(%ebp)
+; CHECK-32-NEXT:    fstpl (%esp)
+; CHECK-32-NEXT:    fldl 20(%ebp)
+; CHECK-32-NEXT:    fstpl {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    fldl 28(%ebp)
+; CHECK-32-NEXT:    fstpl {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    fldl 36(%ebp)
+; CHECK-32-NEXT:    fstpl {{[0-9]+}}(%esp)
+; CHECK-32-NEXT:    wait
+; CHECK-32-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
+; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; CHECK-32-NEXT:    andl %eax, %edx
+; CHECK-32-NEXT:    xorl %ecx, %ecx
+; CHECK-32-NEXT:    xorl %esi, %esi
+; CHECK-32-NEXT:    subl (%esp), %esi
+; CHECK-32-NEXT:    movl $2146435072, %edi # imm = 0x7FF00000
+; CHECK-32-NEXT:    sbbl %edx, %edi
+; CHECK-32-NEXT:    orl %esi, %edi
+; CHECK-32-NEXT:    setne %dl
+; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; CHECK-32-NEXT:    andl %eax, %esi
+; CHECK-32-NEXT:    xorl %edi, %edi
+; CHECK-32-NEXT:    subl {{[0-9]+}}(%esp), %edi
+; CHECK-32-NEXT:    movl $2146435072, %ebx # imm = 0x7FF00000
+; CHECK-32-NEXT:    sbbl %esi, %ebx
+; CHECK-32-NEXT:    orl %edi, %ebx
+; CHECK-32-NEXT:    setne %dh
+; CHECK-32-NEXT:    addb %dh, %dh
+; CHECK-32-NEXT:    orb %dl, %dh
+; CHECK-32-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; CHECK-32-NEXT:    andl %eax, %esi
+; CHECK-32-NEXT:    xorl %edi, %edi
+; CHECK-32-NEXT:    subl {{[0-9]+}}(%esp), %edi
+; CHECK-32-NEXT:    movl $2146435072, %ebx # imm = 0x7FF00000
+; CHECK-32-NEXT:    sbbl %esi, %ebx
+; CHECK-32-NEXT:    orl %edi, %ebx
+; CHECK-32-NEXT:    movl $2146435072, %esi # imm = 0x7FF00000
+; CHECK-32-NEXT:    setne %dl
+; CHECK-32-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; CHECK-32-NEXT:    subl {{[0-9]+}}(%esp), %ecx
+; CHECK-32-NEXT:    sbbl %eax, %esi
+; CHECK-32-NEXT:    orl %ecx, %esi
+; CHECK-32-NEXT:    setne %cl
+; CHECK-32-NEXT:    addb %cl, %cl
+; CHECK-32-NEXT:    orb %dl, %cl
+; CHECK-32-NEXT:    shlb $2, %cl
+; CHECK-32-NEXT:    orb %dh, %cl
+; CHECK-32-NEXT:    movl 8(%ebp), %eax
+; CHECK-32-NEXT:    movb %cl, (%eax)
+; CHECK-32-NEXT:    leal -12(%ebp), %esp
+; CHECK-32-NEXT:    popl %esi
+; CHECK-32-NEXT:    popl %edi
+; CHECK-32-NEXT:    popl %ebx
+; CHECK-32-NEXT:    popl %ebp
+; CHECK-32-NEXT:    .cfi_def_cfa %esp, 4
+; CHECK-32-NEXT:    retl $4
+;
+; CHECK-64-LABEL: isnan_double_vec4_strictfp:
+; CHECK-64:       # %bb.0: # %entry
+; CHECK-64-NEXT:    movdqa {{.*#+}} xmm2 = [9223372036854775807,9223372036854775807]
+; CHECK-64-NEXT:    pand %xmm2, %xmm0
+; CHECK-64-NEXT:    movdqa {{.*#+}} xmm3 = [9218868437227405312,9218868437227405312]
+; CHECK-64-NEXT:    movdqa %xmm3, %xmm4
+; CHECK-64-NEXT:    psubq %xmm0, %xmm4
+; CHECK-64-NEXT:    pand %xmm2, %xmm1
+; CHECK-64-NEXT:    psubq %xmm1, %xmm3
+; CHECK-64-NEXT:    pxor %xmm0, %xmm0
+; CHECK-64-NEXT:    pcmpeqd %xmm0, %xmm3
+; CHECK-64-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[1,0,3,2]
+; CHECK-64-NEXT:    pand %xmm3, %xmm1
+; CHECK-64-NEXT:    pcmpeqd %xmm2, %xmm2
+; CHECK-64-NEXT:    pxor %xmm2, %xmm1
+; CHECK-64-NEXT:    pcmpeqd %xmm0, %xmm4
+; CHECK-64-NEXT:    pshufd {{.*#+}} xmm0 = xmm4[1,0,3,2]
+; CHECK-64-NEXT:    pand %xmm4, %xmm0
+; CHECK-64-NEXT:    pxor %xmm2, %xmm0
+; CHECK-64-NEXT:    packssdw %xmm1, %xmm0
+; CHECK-64-NEXT:    retq
+entry:
+  %0 = tail call <4 x i1> @llvm.isnan.v4f64(<4 x double> %x)
+  ret <4 x i1> %0
+}
+
+
+declare i1 @llvm.isnan.f32(float)
+declare i1 @llvm.isnan.f64(double)
+declare i1 @llvm.isnan.f80(x86_fp80)
+declare <1 x i1> @llvm.isnan.v1f32(<1 x float>)
+declare <1 x i1> @llvm.isnan.v1f64(<1 x double>)
+declare <2 x i1> @llvm.isnan.v2f32(<2 x float>)
+declare <2 x i1> @llvm.isnan.v2f64(<2 x double>)
+declare <4 x i1> @llvm.isnan.v4f32(<4 x float>)
+declare <4 x i1> @llvm.isnan.v4f64(<4 x double>)
Index: llvm/test/CodeGen/PowerPC/ppc-fpclass.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/PowerPC/ppc-fpclass.ll
@@ -0,0 +1,387 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=powerpcle-unknown-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
+
+
+define i1 @isnan_float(float %x) {
+; CHECK-LABEL: isnan_float:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    li 3, 0
+; CHECK-NEXT:    fcmpu 0, 1, 1
+; CHECK-NEXT:    li 4, 1
+; CHECK-NEXT:    bc 12, 3, .LBB0_1
+; CHECK-NEXT:    blr
+; CHECK-NEXT:  .LBB0_1: # %entry
+; CHECK-NEXT:    addi 3, 4, 0
+; CHECK-NEXT:    blr
+entry:
+  %0 = tail call i1 @llvm.isnan.f32(float %x)
+  ret i1 %0
+}
+
+define i1 @isnan_double(double %x) {
+; CHECK-LABEL: isnan_double:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    li 3, 0
+; CHECK-NEXT:    fcmpu 0, 1, 1
+; CHECK-NEXT:    li 4, 1
+; CHECK-NEXT:    bc 12, 3, .LBB1_1
+; CHECK-NEXT:    blr
+; CHECK-NEXT:  .LBB1_1: # %entry
+; CHECK-NEXT:    addi 3, 4, 0
+; CHECK-NEXT:    blr
+entry:
+  %0 = tail call i1 @llvm.isnan.f64(double %x)
+  ret i1 %0
+}
+
+define i1 @isnan_ldouble(ppc_fp128 %x) {
+; CHECK-LABEL: isnan_ldouble:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fcmpu 0, 1, 1
+; CHECK-NEXT:    fcmpu 1, 2, 2
+; CHECK-NEXT:    crandc 20, 7, 3
+; CHECK-NEXT:    li 3, 1
+; CHECK-NEXT:    crnor 20, 3, 20
+; CHECK-NEXT:    bc 12, 20, .LBB2_1
+; CHECK-NEXT:    blr
+; CHECK-NEXT:  .LBB2_1: # %entry
+; CHECK-NEXT:    li 3, 0
+; CHECK-NEXT:    blr
+entry:
+  %0 = tail call i1 @llvm.isnan.ppcf128(ppc_fp128 %x)
+  ret i1 %0
+}
+
+
+define i1 @isnan_float_strictfp(float %x) strictfp {
+; CHECK-LABEL: isnan_float_strictfp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    stwu 1, -16(1)
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    stfs 1, 12(1)
+; CHECK-NEXT:    li 3, 1
+; CHECK-NEXT:    lwz 4, 12(1)
+; CHECK-NEXT:    clrlwi 4, 4, 1
+; CHECK-NEXT:    xoris 4, 4, 32640
+; CHECK-NEXT:    cmplwi 4, 0
+; CHECK-NEXT:    bc 12, 2, .LBB3_1
+; CHECK-NEXT:    b .LBB3_2
+; CHECK-NEXT:  .LBB3_1: # %entry
+; CHECK-NEXT:    li 3, 0
+; CHECK-NEXT:  .LBB3_2: # %entry
+; CHECK-NEXT:    addi 1, 1, 16
+; CHECK-NEXT:    blr
+entry:
+  %0 = tail call i1 @llvm.isnan.f32(float %x)
+  ret i1 %0
+}
+
+define i1 @isnan_double_strictfp(double %x) strictfp {
+; CHECK-LABEL: isnan_double_strictfp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    stwu 1, -16(1)
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    stfd 1, 8(1)
+; CHECK-NEXT:    lwz 3, 12(1)
+; CHECK-NEXT:    lwz 4, 8(1)
+; CHECK-NEXT:    clrlwi 3, 3, 1
+; CHECK-NEXT:    xoris 3, 3, 32752
+; CHECK-NEXT:    or 3, 4, 3
+; CHECK-NEXT:    cntlzw 3, 3
+; CHECK-NEXT:    not 3, 3
+; CHECK-NEXT:    rlwinm 3, 3, 27, 31, 31
+; CHECK-NEXT:    addi 1, 1, 16
+; CHECK-NEXT:    blr
+entry:
+  %0 = tail call i1 @llvm.isnan.f64(double %x)
+  ret i1 %0
+}
+
+define i1 @isnan_ldouble_strictfp(ppc_fp128 %x) strictfp {
+; CHECK-LABEL: isnan_ldouble_strictfp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    stwu 1, -32(1)
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    stfd 1, 16(1)
+; CHECK-NEXT:    stfd 2, 24(1)
+; CHECK-NEXT:    lwz 3, 28(1)
+; CHECK-NEXT:    lwz 4, 24(1)
+; CHECK-NEXT:    lwz 5, 16(1)
+; CHECK-NEXT:    clrlwi 3, 3, 1
+; CHECK-NEXT:    lwz 6, 20(1)
+; CHECK-NEXT:    or 4, 5, 4
+; CHECK-NEXT:    xoris 5, 6, 32752
+; CHECK-NEXT:    or 3, 5, 3
+; CHECK-NEXT:    or 3, 4, 3
+; CHECK-NEXT:    cntlzw 3, 3
+; CHECK-NEXT:    not 3, 3
+; CHECK-NEXT:    rlwinm 3, 3, 27, 31, 31
+; CHECK-NEXT:    addi 1, 1, 32
+; CHECK-NEXT:    blr
+entry:
+  %0 = tail call i1 @llvm.isnan.ppcf128(ppc_fp128 %x)
+  ret i1 %0
+}
+
+
+define <1 x i1> @isnan_float_vec1(<1 x float> %x) {
+; CHECK-LABEL: isnan_float_vec1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    li 3, 0
+; CHECK-NEXT:    fcmpu 0, 1, 1
+; CHECK-NEXT:    li 4, 1
+; CHECK-NEXT:    bc 12, 3, .LBB6_1
+; CHECK-NEXT:    blr
+; CHECK-NEXT:  .LBB6_1: # %entry
+; CHECK-NEXT:    addi 3, 4, 0
+; CHECK-NEXT:    blr
+entry:
+  %0 = tail call <1 x i1> @llvm.isnan.v1f32(<1 x float> %x)
+  ret <1 x i1> %0
+}
+
+define <1 x i1> @isnan_double_vec1(<1 x double> %x) {
+; CHECK-LABEL: isnan_double_vec1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    li 3, 0
+; CHECK-NEXT:    fcmpu 0, 1, 1
+; CHECK-NEXT:    li 4, 1
+; CHECK-NEXT:    bc 12, 3, .LBB7_1
+; CHECK-NEXT:    blr
+; CHECK-NEXT:  .LBB7_1: # %entry
+; CHECK-NEXT:    addi 3, 4, 0
+; CHECK-NEXT:    blr
+entry:
+  %0 = tail call <1 x i1> @llvm.isnan.v1f64(<1 x double> %x)
+  ret <1 x i1> %0
+}
+
+define <1 x i1> @isnan_ldouble_vec1(<1 x ppc_fp128> %x) {
+; CHECK-LABEL: isnan_ldouble_vec1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lis 3, .LCPI8_0@ha
+; CHECK-NEXT:    lfs 0, .LCPI8_0@l(3)
+; CHECK-NEXT:    fcmpu 0, 2, 2
+; CHECK-NEXT:    li 3, 1
+; CHECK-NEXT:    fcmpu 1, 1, 0
+; CHECK-NEXT:    crand 20, 6, 3
+; CHECK-NEXT:    fcmpu 0, 1, 1
+; CHECK-NEXT:    crnor 20, 3, 20
+; CHECK-NEXT:    bc 12, 20, .LBB8_1
+; CHECK-NEXT:    blr
+; CHECK-NEXT:  .LBB8_1: # %entry
+; CHECK-NEXT:    li 3, 0
+; CHECK-NEXT:    blr
+entry:
+  %0 = tail call <1 x i1> @llvm.isnan.v1ppcf128(<1 x ppc_fp128> %x)
+  ret <1 x i1> %0
+}
+
+
+define <2 x i1> @isnan_float_vec2(<2 x float> %x) {
+; CHECK-LABEL: isnan_float_vec2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    li 4, 0
+; CHECK-NEXT:    fcmpu 0, 2, 2
+; CHECK-NEXT:    fcmpu 1, 1, 1
+; CHECK-NEXT:    li 5, 1
+; CHECK-NEXT:    bc 12, 7, .LBB9_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    ori 3, 4, 0
+; CHECK-NEXT:    b .LBB9_3
+; CHECK-NEXT:  .LBB9_2: # %entry
+; CHECK-NEXT:    addi 3, 5, 0
+; CHECK-NEXT:  .LBB9_3: # %entry
+; CHECK-NEXT:    bc 12, 3, .LBB9_4
+; CHECK-NEXT:    blr
+; CHECK-NEXT:  .LBB9_4: # %entry
+; CHECK-NEXT:    addi 4, 5, 0
+; CHECK-NEXT:    blr
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2f32(<2 x float> %x)
+  ret <2 x i1> %0
+}
+
+define <2 x i1> @isnan_double_vec2(<2 x double> %x) {
+; CHECK-LABEL: isnan_double_vec2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    li 4, 0
+; CHECK-NEXT:    fcmpu 0, 2, 2
+; CHECK-NEXT:    fcmpu 1, 1, 1
+; CHECK-NEXT:    li 5, 1
+; CHECK-NEXT:    bc 12, 7, .LBB10_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    ori 3, 4, 0
+; CHECK-NEXT:    b .LBB10_3
+; CHECK-NEXT:  .LBB10_2: # %entry
+; CHECK-NEXT:    addi 3, 5, 0
+; CHECK-NEXT:  .LBB10_3: # %entry
+; CHECK-NEXT:    bc 12, 3, .LBB10_4
+; CHECK-NEXT:    blr
+; CHECK-NEXT:  .LBB10_4: # %entry
+; CHECK-NEXT:    addi 4, 5, 0
+; CHECK-NEXT:    blr
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2f64(<2 x double> %x)
+  ret <2 x i1> %0
+}
+
+define <2 x i1> @isnan_ldouble_vec2(<2 x ppc_fp128> %x) {
+; CHECK-LABEL: isnan_ldouble_vec2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lis 3, .LCPI11_0@ha
+; CHECK-NEXT:    lfs 0, .LCPI11_0@l(3)
+; CHECK-NEXT:    fcmpu 0, 4, 4
+; CHECK-NEXT:    li 4, 1
+; CHECK-NEXT:    fcmpu 1, 3, 0
+; CHECK-NEXT:    crand 20, 6, 3
+; CHECK-NEXT:    fcmpu 0, 2, 2
+; CHECK-NEXT:    fcmpu 1, 1, 0
+; CHECK-NEXT:    crand 21, 6, 3
+; CHECK-NEXT:    fcmpu 0, 3, 3
+; CHECK-NEXT:    crnor 20, 3, 20
+; CHECK-NEXT:    fcmpu 0, 1, 1
+; CHECK-NEXT:    crnor 21, 3, 21
+; CHECK-NEXT:    bc 12, 21, .LBB11_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    ori 3, 4, 0
+; CHECK-NEXT:    b .LBB11_3
+; CHECK-NEXT:  .LBB11_2: # %entry
+; CHECK-NEXT:    li 3, 0
+; CHECK-NEXT:  .LBB11_3: # %entry
+; CHECK-NEXT:    bc 12, 20, .LBB11_4
+; CHECK-NEXT:    blr
+; CHECK-NEXT:  .LBB11_4: # %entry
+; CHECK-NEXT:    li 4, 0
+; CHECK-NEXT:    blr
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2ppcf128(<2 x ppc_fp128> %x)
+  ret <2 x i1> %0
+}
+
+
+define <2 x i1> @isnan_float_vec2_strictfp(<2 x float> %x) strictfp {
+; CHECK-LABEL: isnan_float_vec2_strictfp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    stwu 1, -16(1)
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    stfs 1, 8(1)
+; CHECK-NEXT:    li 4, 1
+; CHECK-NEXT:    stfs 2, 12(1)
+; CHECK-NEXT:    lwz 3, 12(1)
+; CHECK-NEXT:    lwz 5, 8(1)
+; CHECK-NEXT:    clrlwi 3, 3, 1
+; CHECK-NEXT:    xoris 3, 3, 32640
+; CHECK-NEXT:    clrlwi 5, 5, 1
+; CHECK-NEXT:    xoris 5, 5, 32640
+; CHECK-NEXT:    cmplwi 5, 0
+; CHECK-NEXT:    cmplwi 1, 3, 0
+; CHECK-NEXT:    bc 12, 2, .LBB12_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    ori 3, 4, 0
+; CHECK-NEXT:    b .LBB12_3
+; CHECK-NEXT:  .LBB12_2: # %entry
+; CHECK-NEXT:    li 3, 0
+; CHECK-NEXT:  .LBB12_3: # %entry
+; CHECK-NEXT:    bc 12, 6, .LBB12_4
+; CHECK-NEXT:    b .LBB12_5
+; CHECK-NEXT:  .LBB12_4: # %entry
+; CHECK-NEXT:    li 4, 0
+; CHECK-NEXT:  .LBB12_5: # %entry
+; CHECK-NEXT:    addi 1, 1, 16
+; CHECK-NEXT:    blr
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2f32(<2 x float> %x)
+  ret <2 x i1> %0
+}
+
+define <2 x i1> @isnan_double_vec2_strictfp(<2 x double> %x) strictfp {
+; CHECK-LABEL: isnan_double_vec2_strictfp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    stwu 1, -32(1)
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    stfd 2, 24(1)
+; CHECK-NEXT:    lis 3, 32752
+; CHECK-NEXT:    lwz 5, 28(1)
+; CHECK-NEXT:    lwz 6, 24(1)
+; CHECK-NEXT:    stfd 1, 16(1)
+; CHECK-NEXT:    clrlwi 5, 5, 1
+; CHECK-NEXT:    lwz 4, 20(1)
+; CHECK-NEXT:    subfic 6, 6, 0
+; CHECK-NEXT:    lwz 7, 16(1)
+; CHECK-NEXT:    subfe 5, 5, 3
+; CHECK-NEXT:    clrlwi 4, 4, 1
+; CHECK-NEXT:    or 5, 6, 5
+; CHECK-NEXT:    subfic 6, 7, 0
+; CHECK-NEXT:    subfe 3, 4, 3
+; CHECK-NEXT:    or 3, 6, 3
+; CHECK-NEXT:    cntlzw 4, 5
+; CHECK-NEXT:    cntlzw 3, 3
+; CHECK-NEXT:    not 4, 4
+; CHECK-NEXT:    not 3, 3
+; CHECK-NEXT:    rlwinm 3, 3, 27, 31, 31
+; CHECK-NEXT:    rlwinm 4, 4, 27, 31, 31
+; CHECK-NEXT:    addi 1, 1, 32
+; CHECK-NEXT:    blr
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2f64(<2 x double> %x)
+  ret <2 x i1> %0
+}
+
+define <2 x i1> @isnan_ldouble_vec2_strictfp(<2 x ppc_fp128> %x) strictfp {
+; CHECK-LABEL: isnan_ldouble_vec2_strictfp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    stwu 1, -48(1)
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    stfd 3, 32(1)
+; CHECK-NEXT:    lis 3, 32752
+; CHECK-NEXT:    lwz 8, 32(1)
+; CHECK-NEXT:    stfd 4, 40(1)
+; CHECK-NEXT:    lwz 10, 36(1)
+; CHECK-NEXT:    subfic 8, 8, 0
+; CHECK-NEXT:    lwz 9, 44(1)
+; CHECK-NEXT:    lwz 11, 40(1)
+; CHECK-NEXT:    subfe 10, 10, 3
+; CHECK-NEXT:    stfd 1, 16(1)
+; CHECK-NEXT:    clrlwi 9, 9, 1
+; CHECK-NEXT:    lwz 4, 16(1)
+; CHECK-NEXT:    subfze 11, 11
+; CHECK-NEXT:    stfd 2, 24(1)
+; CHECK-NEXT:    subfze 9, 9
+; CHECK-NEXT:    lwz 5, 20(1)
+; CHECK-NEXT:    subfic 4, 4, 0
+; CHECK-NEXT:    lwz 6, 24(1)
+; CHECK-NEXT:    or 8, 8, 11
+; CHECK-NEXT:    lwz 7, 28(1)
+; CHECK-NEXT:    subfe 3, 5, 3
+; CHECK-NEXT:    subfze 5, 6
+; CHECK-NEXT:    or 9, 10, 9
+; CHECK-NEXT:    clrlwi 7, 7, 1
+; CHECK-NEXT:    subfze 7, 7
+; CHECK-NEXT:    or 4, 4, 5
+; CHECK-NEXT:    or 3, 3, 7
+; CHECK-NEXT:    or 8, 8, 9
+; CHECK-NEXT:    or 3, 4, 3
+; CHECK-NEXT:    cntlzw 6, 8
+; CHECK-NEXT:    cntlzw 3, 3
+; CHECK-NEXT:    not 5, 6
+; CHECK-NEXT:    not 3, 3
+; CHECK-NEXT:    rlwinm 3, 3, 27, 31, 31
+; CHECK-NEXT:    rlwinm 4, 5, 27, 31, 31
+; CHECK-NEXT:    addi 1, 1, 48
+; CHECK-NEXT:    blr
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2ppcf128(<2 x ppc_fp128> %x)
+  ret <2 x i1> %0
+}
+
+
+declare i1 @llvm.isnan.f32(float)
+declare i1 @llvm.isnan.f64(double)
+declare i1 @llvm.isnan.ppcf128(ppc_fp128)
+declare <1 x i1> @llvm.isnan.v1f32(<1 x float>)
+declare <1 x i1> @llvm.isnan.v1f64(<1 x double>)
+declare <1 x i1> @llvm.isnan.v1ppcf128(<1 x ppc_fp128>)
+declare <2 x i1> @llvm.isnan.v2f32(<2 x float>)
+declare <2 x i1> @llvm.isnan.v2f64(<2 x double>)
+declare <2 x i1> @llvm.isnan.v2ppcf128(<2 x ppc_fp128>)
Index: llvm/test/CodeGen/AArch64/aarch64-fpclass.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/AArch64/aarch64-fpclass.ll
@@ -0,0 +1,507 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+bf16 | FileCheck %s -check-prefix=CHECK
+
+define i1 @isnan_half(half %x) {
+; CHECK-LABEL: isnan_half:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    fcmp s0, s0
+; CHECK-NEXT:    cset w0, vs
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call i1 @llvm.isnan.f16(half %x)
+  ret i1 %0
+}
+
+define i1 @isnan_float(float %x) {
+; CHECK-LABEL: isnan_float:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmp s0, s0
+; CHECK-NEXT:    cset w0, vs
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call i1 @llvm.isnan.f32(float %x)
+  ret i1 %0
+}
+
+define i1 @isnan_double(double %x) {
+; CHECK-LABEL: isnan_double:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmp d0, d0
+; CHECK-NEXT:    cset w0, vs
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call i1 @llvm.isnan.f64(double %x)
+  ret i1 %0
+}
+
+define i1 @isnan_ldouble(fp128 %x) {
+; CHECK-LABEL: isnan_ldouble:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    mov v1.16b, v0.16b
+; CHECK-NEXT:    bl __unordtf2
+; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call i1 @llvm.isnan.f128(fp128 %x)
+  ret i1 %0
+}
+
+
+define i1 @isnan_half_strictfp(half %x) strictfp {
+; CHECK-LABEL: isnan_half_strictfp:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $h0 killed $h0 def $s0
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    and w8, w8, #0x7fff
+; CHECK-NEXT:    mov w9, #31744
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call i1 @llvm.isnan.f16(half %x)
+  ret i1 %0
+}
+
+define i1 @isnan_bfloat_strictfp(bfloat %x) strictfp {
+; CHECK-LABEL: isnan_bfloat_strictfp:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $h0 killed $h0 def $s0
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    and w8, w8, #0x7fff
+; CHECK-NEXT:    mov w9, #32640
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call i1 @llvm.isnan.bf16(bfloat %x)
+  ret i1 %0
+}
+
+define i1 @isnan_float_strictfp(float %x) strictfp {
+; CHECK-LABEL: isnan_float_strictfp:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    and w8, w8, #0x7fffffff
+; CHECK-NEXT:    mov w9, #2139095040
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call i1 @llvm.isnan.f32(float %x)
+  ret i1 %0
+}
+
+define i1 @isnan_double_strictfp(double %x) strictfp {
+; CHECK-LABEL: isnan_double_strictfp:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmov x8, d0
+; CHECK-NEXT:    and x8, x8, #0x7fffffffffffffff
+; CHECK-NEXT:    mov x9, #9218868437227405312
+; CHECK-NEXT:    cmp x8, x9
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call i1 @llvm.isnan.f64(double %x)
+  ret i1 %0
+}
+
+define i1 @isnan_ldouble_strictfp(fp128 %x) strictfp {
+; CHECK-LABEL: isnan_ldouble_strictfp:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    str q0, [sp, #-16]!
+; CHECK-NEXT:    ldp x9, x8, [sp], #16
+; CHECK-NEXT:    and x8, x8, #0x7fffffffffffffff
+; CHECK-NEXT:    eor x8, x8, #0x7fff000000000000
+; CHECK-NEXT:    orr x8, x9, x8
+; CHECK-NEXT:    cmp x8, #0 // =0
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call i1 @llvm.isnan.f128(fp128 %x)
+  ret i1 %0
+}
+
+
+define <1 x i1> @isnan_half_vec1(<1 x half> %x) {
+; CHECK-LABEL: isnan_half_vec1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    fcmp s0, s0
+; CHECK-NEXT:    cset w0, vs
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <1 x i1> @llvm.isnan.v1f16(<1 x half> %x)
+  ret <1 x i1> %0
+}
+
+define <1 x i1> @isnan_float_vec1(<1 x float> %x) {
+; CHECK-LABEL: isnan_float_vec1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fcmp s0, s0
+; CHECK-NEXT:    cset w0, vs
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <1 x i1> @llvm.isnan.v1f32(<1 x float> %x)
+  ret <1 x i1> %0
+}
+
+define <1 x i1> @isnan_double_vec1(<1 x double> %x) {
+; CHECK-LABEL: isnan_double_vec1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmp d0, d0
+; CHECK-NEXT:    cset w0, vs
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <1 x i1> @llvm.isnan.v1f64(<1 x double> %x)
+  ret <1 x i1> %0
+}
+
+define <1 x i1> @isnan_ldouble_vec1(<1 x fp128> %x) {
+; CHECK-LABEL: isnan_ldouble_vec1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    mov v1.16b, v0.16b
+; CHECK-NEXT:    bl __unordtf2
+; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <1 x i1> @llvm.isnan.v1f128(<1 x fp128> %x)
+  ret <1 x i1> %0
+}
+
+
+define <2 x i1> @isnan_half_vec2(<2 x half> %x) {
+; CHECK-LABEL: isnan_half_vec2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NEXT:    fcmge v1.4s, v0.4s, #0.0
+; CHECK-NEXT:    fcmlt v0.4s, v0.4s, #0.0
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NEXT:    mvn v0.8b, v0.8b
+; CHECK-NEXT:    umov w8, v0.h[0]
+; CHECK-NEXT:    fmov s1, w8
+; CHECK-NEXT:    umov w8, v0.h[1]
+; CHECK-NEXT:    mov v1.s[1], w8
+; CHECK-NEXT:    shl v0.2s, v1.2s, #16
+; CHECK-NEXT:    sshr v0.2s, v0.2s, #16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2f16(<2 x half> %x)
+  ret <2 x i1> %0
+}
+
+define <2 x i1> @isnan_float_vec2(<2 x float> %x) {
+; CHECK-LABEL: isnan_float_vec2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmge v1.2s, v0.2s, #0.0
+; CHECK-NEXT:    fcmlt v0.2s, v0.2s, #0.0
+; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    mvn v0.8b, v0.8b
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2f32(<2 x float> %x)
+  ret <2 x i1> %0
+}
+
+define <2 x i1> @isnan_double_vec2(<2 x double> %x) {
+; CHECK-LABEL: isnan_double_vec2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmge v1.2d, v0.2d, #0.0
+; CHECK-NEXT:    fcmlt v0.2d, v0.2d, #0.0
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    mvn v0.16b, v0.16b
+; CHECK-NEXT:    xtn v0.2s, v0.2d
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2f64(<2 x double> %x)
+  ret <2 x i1> %0
+}
+
+define <2 x i1> @isnan_ldouble_vec2(<2 x fp128> %x) {
+; CHECK-LABEL: isnan_ldouble_vec2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sub sp, sp, #48 // =48
+; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    bl __unordtf2
+; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cset w8, ne
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    dup v0.2d, x8
+; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    mov v1.16b, v0.16b
+; CHECK-NEXT:    bl __unordtf2
+; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    cset w8, ne
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT:    dup v0.2d, x8
+; CHECK-NEXT:    zip1 v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    add sp, sp, #48 // =48
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2f128(<2 x fp128> %x)
+  ret <2 x i1> %0
+}
+
+
+define <2 x i1> @isnan_half_vec2_strictfp(<2 x half> %x) strictfp {
+; CHECK-LABEL: isnan_half_vec2_strictfp:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w8, v0.h[0]
+; CHECK-NEXT:    umov w9, v0.h[1]
+; CHECK-NEXT:    fmov s2, w8
+; CHECK-NEXT:    movi v0.2s, #127, msl #8
+; CHECK-NEXT:    mov v2.s[1], w9
+; CHECK-NEXT:    movi v1.2s, #124, lsl #8
+; CHECK-NEXT:    and v0.8b, v2.8b, v0.8b
+; CHECK-NEXT:    sub v0.2s, v1.2s, v0.2s
+; CHECK-NEXT:    movi d1, #0x00ffff0000ffff
+; CHECK-NEXT:    cmtst v0.2s, v0.2s, v1.2s
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2f16(<2 x half> %x)
+  ret <2 x i1> %0
+}
+
+define <2 x i1> @isnan_bfloat_vec2_strictfp(<2 x bfloat> %x) strictfp {
+; CHECK-LABEL: isnan_bfloat_vec2_strictfp:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w8, v0.h[0]
+; CHECK-NEXT:    umov w9, v0.h[1]
+; CHECK-NEXT:    fmov s1, w8
+; CHECK-NEXT:    movi v0.2s, #127, msl #8
+; CHECK-NEXT:    mov w10, #32640
+; CHECK-NEXT:    mov v1.s[1], w9
+; CHECK-NEXT:    dup v2.2s, w10
+; CHECK-NEXT:    and v0.8b, v1.8b, v0.8b
+; CHECK-NEXT:    sub v0.2s, v2.2s, v0.2s
+; CHECK-NEXT:    movi d1, #0x00ffff0000ffff
+; CHECK-NEXT:    cmtst v0.2s, v0.2s, v1.2s
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2bf16(<2 x bfloat> %x)
+  ret <2 x i1> %0
+}
+
+define <2 x i1> @isnan_float_vec2_strictfp(<2 x float> %x) strictfp {
+; CHECK-LABEL: isnan_float_vec2_strictfp:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov w8, #2139095040
+; CHECK-NEXT:    bic v0.2s, #128, lsl #24
+; CHECK-NEXT:    dup v1.2s, w8
+; CHECK-NEXT:    sub v0.2s, v1.2s, v0.2s
+; CHECK-NEXT:    cmtst v0.2s, v0.2s, v0.2s
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2f32(<2 x float> %x)
+  ret <2 x i1> %0
+}
+
+define <2 x i1> @isnan_double_vec2_strictfp(<2 x double> %x) strictfp {
+; CHECK-LABEL: isnan_double_vec2_strictfp:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov x8, #9223372036854775807
+; CHECK-NEXT:    mov x9, #9218868437227405312
+; CHECK-NEXT:    dup v1.2d, x8
+; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    dup v1.2d, x9
+; CHECK-NEXT:    sub v0.2d, v1.2d, v0.2d
+; CHECK-NEXT:    cmtst v0.2d, v0.2d, v0.2d
+; CHECK-NEXT:    xtn v0.2s, v0.2d
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2f64(<2 x double> %x)
+  ret <2 x i1> %0
+}
+
+define <2 x i1> @isnan_ldouble_vec2_strictfp(<2 x fp128> %x) strictfp {
+; CHECK-LABEL: isnan_ldouble_vec2_strictfp:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    stp q0, q1, [sp, #-32]!
+; CHECK-NEXT:    ldp x8, x9, [sp]
+; CHECK-NEXT:    ldp x10, x11, [sp, #16]
+; CHECK-NEXT:    mov x12, #9223090561878065152
+; CHECK-NEXT:    and x9, x9, #0x7fffffffffffffff
+; CHECK-NEXT:    negs x8, x8
+; CHECK-NEXT:    sbcs x9, x12, x9
+; CHECK-NEXT:    and x11, x11, #0x7fffffffffffffff
+; CHECK-NEXT:    negs x10, x10
+; CHECK-NEXT:    orr x8, x8, x9
+; CHECK-NEXT:    sbcs x9, x12, x11
+; CHECK-NEXT:    orr x9, x10, x9
+; CHECK-NEXT:    cmp x9, #0 // =0
+; CHECK-NEXT:    cset w9, ne
+; CHECK-NEXT:    cmp x8, #0 // =0
+; CHECK-NEXT:    sbfx x8, x9, #0, #1
+; CHECK-NEXT:    cset w9, ne
+; CHECK-NEXT:    dup v0.2d, x8
+; CHECK-NEXT:    sbfx x8, x9, #0, #1
+; CHECK-NEXT:    dup v1.2d, x8
+; CHECK-NEXT:    zip1 v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <2 x i1> @llvm.isnan.v2f128(<2 x fp128> %x)
+  ret <2 x i1> %0
+}
+
+
+define <4 x i1> @isnan_half_vec4(<4 x half> %x) {
+; CHECK-LABEL: isnan_half_vec4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcvtl v0.4s, v0.4h
+; CHECK-NEXT:    fcmge v1.4s, v0.4s, #0.0
+; CHECK-NEXT:    fcmlt v0.4s, v0.4s, #0.0
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NEXT:    mvn v0.8b, v0.8b
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <4 x i1> @llvm.isnan.v4f16(<4 x half> %x)
+  ret <4 x i1> %0
+}
+
+define <4 x i1> @isnan_float_vec4(<4 x float> %x) {
+; CHECK-LABEL: isnan_float_vec4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmge v1.4s, v0.4s, #0.0
+; CHECK-NEXT:    fcmlt v0.4s, v0.4s, #0.0
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    mvn v0.16b, v0.16b
+; CHECK-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <4 x i1> @llvm.isnan.v4f32(<4 x float> %x)
+  ret <4 x i1> %0
+}
+
+define <4 x i1> @isnan_double_vec4(<4 x double> %x) {
+; CHECK-LABEL: isnan_double_vec4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmge v2.2d, v0.2d, #0.0
+; CHECK-NEXT:    fcmlt v0.2d, v0.2d, #0.0
+; CHECK-NEXT:    fcmge v3.2d, v1.2d, #0.0
+; CHECK-NEXT:    fcmlt v1.2d, v1.2d, #0.0
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    orr v1.16b, v1.16b, v3.16b
+; CHECK-NEXT:    mvn v0.16b, v0.16b
+; CHECK-NEXT:    xtn v0.2s, v0.2d
+; CHECK-NEXT:    mvn v1.16b, v1.16b
+; CHECK-NEXT:    xtn2 v0.4s, v1.2d
+; CHECK-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <4 x i1> @llvm.isnan.v4f64(<4 x double> %x)
+  ret <4 x i1> %0
+}
+
+
+define <4 x i1> @isnan_half_vec4_strictfp(<4 x half> %x) strictfp {
+; CHECK-LABEL: isnan_half_vec4_strictfp:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v1.4h, #124, lsl #8
+; CHECK-NEXT:    bic v0.4h, #128, lsl #8
+; CHECK-NEXT:    sub v0.4h, v1.4h, v0.4h
+; CHECK-NEXT:    cmtst v0.4h, v0.4h, v0.4h
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <4 x i1> @llvm.isnan.v4f16(<4 x half> %x)
+  ret <4 x i1> %0
+}
+
+define <4 x i1> @isnan_bfloat_vec4_strictfp(<4 x bfloat> %x) strictfp {
+; CHECK-LABEL: isnan_bfloat_vec4_strictfp:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov w8, #32640
+; CHECK-NEXT:    bic v0.4h, #128, lsl #8
+; CHECK-NEXT:    dup v1.4h, w8
+; CHECK-NEXT:    sub v0.4h, v1.4h, v0.4h
+; CHECK-NEXT:    cmtst v0.4h, v0.4h, v0.4h
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <4 x i1> @llvm.isnan.v4bf16(<4 x bfloat> %x)
+  ret <4 x i1> %0
+}
+
+define <4 x i1> @isnan_float_vec4_strictfp(<4 x float> %x) strictfp {
+; CHECK-LABEL: isnan_float_vec4_strictfp:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov w8, #2139095040
+; CHECK-NEXT:    bic v0.4s, #128, lsl #24
+; CHECK-NEXT:    dup v1.4s, w8
+; CHECK-NEXT:    sub v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    cmtst v0.4s, v0.4s, v0.4s
+; CHECK-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <4 x i1> @llvm.isnan.v4f32(<4 x float> %x)
+  ret <4 x i1> %0
+}
+
+define <4 x i1> @isnan_double_vec4_strictfp(<4 x double> %x) strictfp {
+; CHECK-LABEL: isnan_double_vec4_strictfp:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov x8, #9223372036854775807
+; CHECK-NEXT:    dup v2.2d, x8
+; CHECK-NEXT:    mov x8, #9218868437227405312
+; CHECK-NEXT:    dup v3.2d, x8
+; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    sub v0.2d, v3.2d, v0.2d
+; CHECK-NEXT:    sub v1.2d, v3.2d, v1.2d
+; CHECK-NEXT:    cmtst v0.2d, v0.2d, v0.2d
+; CHECK-NEXT:    xtn v0.2s, v0.2d
+; CHECK-NEXT:    cmtst v1.2d, v1.2d, v1.2d
+; CHECK-NEXT:    xtn2 v0.4s, v1.2d
+; CHECK-NEXT:    xtn v0.4h, v0.4s
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <4 x i1> @llvm.isnan.v4f64(<4 x double> %x)
+  ret <4 x i1> %0
+}
+
+
+declare i1 @llvm.isnan.f16(half)
+declare i1 @llvm.isnan.bf16(bfloat)
+declare i1 @llvm.isnan.f32(float)
+declare i1 @llvm.isnan.f64(double)
+declare i1 @llvm.isnan.f128(fp128)
+declare <1 x i1> @llvm.isnan.v1f16(<1 x half>)
+declare <1 x i1> @llvm.isnan.v1bf16(<1 x bfloat>)
+declare <1 x i1> @llvm.isnan.v1f32(<1 x float>)
+declare <1 x i1> @llvm.isnan.v1f64(<1 x double>)
+declare <1 x i1> @llvm.isnan.v1f128(<1 x fp128>)
+declare <2 x i1> @llvm.isnan.v2f16(<2 x half>)
+declare <2 x i1> @llvm.isnan.v2bf16(<2 x bfloat>)
+declare <2 x i1> @llvm.isnan.v2f32(<2 x float>)
+declare <2 x i1> @llvm.isnan.v2f64(<2 x double>)
+declare <2 x i1> @llvm.isnan.v2f128(<2 x fp128>)
+declare <4 x i1> @llvm.isnan.v4f16(<4 x half>)
+declare <4 x i1> @llvm.isnan.v4bf16(<4 x bfloat>)
+declare <4 x i1> @llvm.isnan.v4f32(<4 x float>)
+declare <4 x i1> @llvm.isnan.v4f64(<4 x double>)
+declare <4 x i1> @llvm.isnan.v4f128(<4 x fp128>)
Index: llvm/lib/Target/X86/X86ISelLowering.cpp
===================================================================
--- llvm/lib/Target/X86/X86ISelLowering.cpp
+++ llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -712,6 +712,7 @@
     setOperationAction(ISD::LLROUND, MVT::f80, Expand);
     setOperationAction(ISD::LRINT, MVT::f80, Custom);
     setOperationAction(ISD::LLRINT, MVT::f80, Custom);
+    setOperationAction(ISD::ISNAN, MVT::f80, Custom);
 
     // Handle constrained floating-point operations of scalar.
     setOperationAction(ISD::STRICT_FADD     , MVT::f80, Legal);
@@ -22011,6 +22012,45 @@
   return Res;
 }
 
+static SDValue lowerISNAN(SDValue Op, SelectionDAG &DAG) {
+  SDLoc DL(Op);
+  SDValue Arg = Op.getOperand(0);
+  MVT ArgVT = Arg.getSimpleValueType();
+  MVT ResultVT = Op.getSimpleValueType();
+
+  // Determine classification of argument using instruction FXAM.
+  unsigned Opc;
+  switch (ArgVT.SimpleTy) {
+  default:
+    llvm_unreachable("Unexpected type!");
+  case MVT::f32:
+    Opc = X86::XAM_Fp32;
+    break;
+  case MVT::f64:
+    Opc = X86::XAM_Fp64;
+    break;
+  case MVT::f80:
+    Opc = X86::XAM_Fp80;
+    break;
+  }
+  SDValue Test(DAG.getMachineNode(Opc, DL, MVT::Glue, Arg), 0);
+
+  // Move FPSW to AX.
+  SDValue FNSTSW = SDValue(
+      DAG.getMachineNode(X86::FNSTSW16r, DL, MVT::i16, Test), 0);
+
+  // Extract upper 8-bits of AX.
+  SDValue Extract =
+      DAG.getTargetExtractSubreg(X86::sub_8bit_hi, DL, MVT::i8, FNSTSW);
+
+  // Mask all bits but C3, C2, C0.
+  Extract = DAG.getNode(ISD::AND, DL, MVT::i8, Extract,
+                        DAG.getConstant(0x45, DL, MVT::i8));
+
+  return DAG.getSetCC(DL, ResultVT, Extract, DAG.getConstant(1, DL, MVT::i8),
+                      ISD::CondCode::SETEQ);
+}
+
 /// Helper for creating a X86ISD::SETCC node.
 static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
                         SelectionDAG &DAG) {
@@ -30341,6 +30381,7 @@
   case ISD::FNEG:               return LowerFABSorFNEG(Op, DAG);
   case ISD::FCOPYSIGN:          return LowerFCOPYSIGN(Op, DAG);
   case ISD::FGETSIGN:           return LowerFGETSIGN(Op, DAG);
+  case ISD::ISNAN:              return lowerISNAN(Op, DAG);
   case ISD::LRINT:
   case ISD::LLRINT:             return LowerLRINT_LLRINT(Op, DAG);
   case ISD::SETCC:
Index: llvm/lib/CodeGen/TargetLoweringBase.cpp
===================================================================
--- llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -760,6 +760,7 @@
 
     // These operations default to expand.
     setOperationAction(ISD::FGETSIGN, VT, Expand);
+    setOperationAction(ISD::ISNAN, VT, Expand);
     setOperationAction(ISD::CONCAT_VECTORS, VT, Expand);
     setOperationAction(ISD::FMINNUM, VT, Expand);
     setOperationAction(ISD::FMAXNUM, VT, Expand);
Index: llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
===================================================================
--- llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -6953,6 +6953,37 @@
   return SDValue();
 }
 
+SDValue TargetLowering::expandISNAN(EVT ResultVT, SDValue Op, SDNodeFlags Flags,
+                                    const SDLoc &DL, SelectionDAG &DAG) const {
+  EVT OperandVT = Op.getValueType();
+  assert(OperandVT.isFloatingPoint());
+
+  // If floating point exceptions are ignored, expand to unordered comparison.
+  if (Flags.hasNoFPExcept())
+    return DAG.getSetCC(DL, ResultVT, Op, DAG.getConstantFP(0.0, DL, OperandVT),
+                        ISD::SETUO);
+
+  // In general case use integer operations to avoid traps if argument is SNaN.
+
+  // NaN has all exp bits set and a non zero significand. Therefore:
+  // isnan(V) == ((exp mask - abs(V)) < 0)
+  unsigned BitSize = OperandVT.getScalarSizeInBits();
+  EVT IntVT = OperandVT.changeTypeToInteger();
+  SDValue ArgV = DAG.getBitcast(IntVT, Op);
+  APInt AndMask = APInt::getSignedMaxValue(BitSize);
+  SDValue AndMaskV = DAG.getConstant(AndMask, DL, IntVT);
+  SDValue AbsV = DAG.getNode(ISD::AND, DL, IntVT, ArgV, AndMaskV);
+  EVT ScalarFloatVT = OperandVT.getScalarType();
+  const Type *FloatTy = ScalarFloatVT.getTypeForEVT(*DAG.getContext());
+  const llvm::fltSemantics &Semantics = FloatTy->getFltSemantics();
+  APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt();
+  SDValue ExpMaskV = DAG.getConstant(ExpMask, DL, IntVT);
+  SDValue Sub = DAG.getNode(ISD::SUB, DL, IntVT, ExpMaskV, AbsV);
+  // V = sign bit (Sub) <=> V = (Sub < 0)
+  return DAG.getSetCC(DL, ResultVT, Sub, DAG.getConstant(0, DL, IntVT),
+                      ISD::SETNE);
+}
+
 bool TargetLowering::expandCTPOP(SDNode *Node, SDValue &Result,
                                  SelectionDAG &DAG) const {
   SDLoc dl(Node);
Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
===================================================================
--- llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -267,6 +267,7 @@
   case ISD::FCOPYSIGN:                  return "fcopysign";
   case ISD::FGETSIGN:                   return "fgetsign";
   case ISD::FCANONICALIZE:              return "fcanonicalize";
+  case ISD::ISNAN:                      return "isnan";
   case ISD::FPOW:                       return "fpow";
   case ISD::STRICT_FPOW:                return "strict_fpow";
   case ISD::SMIN:                       return "smin";
Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
===================================================================
--- llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -6402,6 +6402,30 @@
     DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
     return;
   }
+  case Intrinsic::isnan: {
+    const DataLayout DLayout = DAG.getDataLayout();
+    EVT DestVT = TLI.getValueType(DLayout, I.getType());
+    EVT ArgVT = TLI.getValueType(DLayout, I.getArgOperand(0)->getType());
+    MachineFunction &MF = DAG.getMachineFunction();
+    const Function &F = MF.getFunction();
+    SDValue Op = getValue(I.getArgOperand(0));
+    SDNodeFlags Flags;
+    Flags.setNoFPExcept(
+        !F.getAttributes().hasFnAttribute(llvm::Attribute::StrictFP));
+
+    // If ISD::ISNAN should be expanded, do it right now, because the expansion
+    // can use illegal types. Making expansion early allows to legalize these
+    // types prior to selection.
+    if (!TLI.isOperationLegalOrCustom(ISD::ISNAN, ArgVT)) {
+      SDValue Result = TLI.expandISNAN(DestVT, Op, Flags, sdl, DAG);
+      setValue(&I, Result);
+      return;
+    }
+
+    SDValue V = DAG.getNode(ISD::ISNAN, sdl, DestVT, Op, Flags);
+    setValue(&I, V);
+    return;
+  }
   case Intrinsic::readcyclecounter: {
     SDValue Op = getRoot();
     Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
Index: llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
===================================================================
--- llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -64,6 +64,7 @@
   case ISD::SETCC:             R = ScalarizeVecRes_SETCC(N); break;
   case ISD::UNDEF:             R = ScalarizeVecRes_UNDEF(N); break;
   case ISD::VECTOR_SHUFFLE:    R = ScalarizeVecRes_VECTOR_SHUFFLE(N); break;
+  case ISD::ISNAN:             R = ScalarizeVecRes_ISNAN(N); break;
   case ISD::ANY_EXTEND_VECTOR_INREG:
   case ISD::SIGN_EXTEND_VECTOR_INREG:
   case ISD::ZERO_EXTEND_VECTOR_INREG:
@@ -582,6 +583,28 @@
   return DAG.getNode(ExtendCode, DL, NVT, Res);
 }
 
+SDValue DAGTypeLegalizer::ScalarizeVecRes_ISNAN(SDNode *N) {
+  SDLoc DL(N);
+  SDValue Arg = N->getOperand(0);
+  EVT ArgVT = Arg.getValueType();
+  EVT ResultVT = N->getValueType(0).getVectorElementType();
+
+  // Handle case where result is scalarized but operand is not.
+  if (getTypeAction(ArgVT) == TargetLowering::TypeScalarizeVector) {
+    Arg = GetScalarizedVector(Arg);
+  } else {
+    EVT VT = ArgVT.getVectorElementType();
+    Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Arg,
+                      DAG.getVectorIdxConstant(0, DL));
+  }
+
+  SDValue Res = DAG.getNode(ISD::ISNAN, DL, MVT::i1, Arg, N->getFlags());
+  // Vectors may have a different boolean contents to scalars.  Promote the
+  // value appropriately.
+  ISD::NodeType ExtendCode =
+      TargetLowering::getExtendForContent(TLI.getBooleanContents(ArgVT));
+  return DAG.getNode(ExtendCode, DL, ResultVT, Res);
+}
 
 //===----------------------------------------------------------------------===//
 //  Operand Vector Scalarization <1 x ty> -> ty.
@@ -924,6 +947,7 @@
   case ISD::INSERT_SUBVECTOR:  SplitVecRes_INSERT_SUBVECTOR(N, Lo, Hi); break;
   case ISD::FPOWI:             SplitVecRes_FPOWI(N, Lo, Hi); break;
   case ISD::FCOPYSIGN:         SplitVecRes_FCOPYSIGN(N, Lo, Hi); break;
+  case ISD::ISNAN:             SplitVecRes_ISNAN(N, Lo, Hi); break;
   case ISD::INSERT_VECTOR_ELT: SplitVecRes_INSERT_VECTOR_ELT(N, Lo, Hi); break;
   case ISD::SPLAT_VECTOR:
   case ISD::SCALAR_TO_VECTOR:
@@ -1360,6 +1384,17 @@
   Hi = DAG.getNode(ISD::FCOPYSIGN, DL, LHSHi.getValueType(), LHSHi, RHSHi);
 }
 
+void DAGTypeLegalizer::SplitVecRes_ISNAN(SDNode *N, SDValue &Lo, SDValue &Hi) {
+  SDLoc DL(N);
+  SDValue ArgLo, ArgHi;
+  GetSplitVector(N->getOperand(0), ArgLo, ArgHi);
+  EVT LoVT, HiVT;
+  std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
+
+  Lo = DAG.getNode(ISD::ISNAN, DL, LoVT, ArgLo, N->getFlags());
+  Hi = DAG.getNode(ISD::ISNAN, DL, HiVT, ArgHi, N->getFlags());
+}
+
 void DAGTypeLegalizer::SplitVecRes_InregOp(SDNode *N, SDValue &Lo,
                                            SDValue &Hi) {
   SDValue LHSLo, LHSHi;
@@ -4547,6 +4582,7 @@
   case ISD::STRICT_FSETCCS:     Res = WidenVecOp_STRICT_FSETCC(N); break;
   case ISD::VSELECT:            Res = WidenVecOp_VSELECT(N); break;
   case ISD::FCOPYSIGN:          Res = WidenVecOp_FCOPYSIGN(N); break;
+  case ISD::ISNAN:              Res = WidenVecOp_ISNAN(N); break;
 
   case ISD::ANY_EXTEND:
   case ISD::SIGN_EXTEND:
@@ -4683,6 +4719,33 @@
   return DAG.UnrollVectorOp(N);
 }
 
+SDValue DAGTypeLegalizer::WidenVecOp_ISNAN(SDNode *N) {
+  SDLoc DL(N);
+  EVT ResultVT = N->getValueType(0);
+  SDValue WideArg = GetWidenedVector(N->getOperand(0));
+
+  // Process this node similarly to SETCC.
+  EVT WideResultVT = getSetCCResultType(WideArg.getValueType());
+  if (ResultVT.getScalarType() == MVT::i1)
+    WideResultVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
+                                    WideResultVT.getVectorNumElements());
+
+  SDValue WideNode =
+      DAG.getNode(ISD::ISNAN, DL, WideResultVT, WideArg, N->getFlags());
+
+  // Extract the needed results from the result vector.
+  EVT ResVT =
+      EVT::getVectorVT(*DAG.getContext(), WideResultVT.getVectorElementType(),
+                       ResultVT.getVectorNumElements());
+  SDValue CC = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResVT, WideNode,
+                           DAG.getVectorIdxConstant(0, DL));
+
+  EVT OpVT = N->getOperand(0).getValueType();
+  ISD::NodeType ExtendCode =
+      TargetLowering::getExtendForContent(TLI.getBooleanContents(OpVT));
+  return DAG.getNode(ExtendCode, DL, ResultVT, CC);
+}
+
 SDValue DAGTypeLegalizer::WidenVecOp_Convert(SDNode *N) {
   // Since the result is legal and the input is illegal.
   EVT VT = N->getValueType(0);
Index: llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
===================================================================
--- llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -352,6 +352,7 @@
   SDValue PromoteIntRes_MULFIX(SDNode *N);
   SDValue PromoteIntRes_DIVFIX(SDNode *N);
   SDValue PromoteIntRes_FLT_ROUNDS(SDNode *N);
+  SDValue PromoteIntRes_ISNAN(SDNode *N);
   SDValue PromoteIntRes_VECREDUCE(SDNode *N);
   SDValue PromoteIntRes_ABS(SDNode *N);
   SDValue PromoteIntRes_Rotate(SDNode *N);
@@ -773,6 +774,7 @@
   SDValue ScalarizeVecRes_UNDEF(SDNode *N);
   SDValue ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N);
   SDValue ScalarizeVecRes_FP_TO_XINT_SAT(SDNode *N);
+  SDValue ScalarizeVecRes_ISNAN(SDNode *N);
 
   SDValue ScalarizeVecRes_FIX(SDNode *N);
 
@@ -833,6 +835,7 @@
   void SplitVecRes_INSERT_SUBVECTOR(SDNode *N, SDValue &Lo, SDValue &Hi);
   void SplitVecRes_FPOWI(SDNode *N, SDValue &Lo, SDValue &Hi);
   void SplitVecRes_FCOPYSIGN(SDNode *N, SDValue &Lo, SDValue &Hi);
+  void SplitVecRes_ISNAN(SDNode *N, SDValue &Lo, SDValue &Hi);
   void SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo, SDValue &Hi);
   void SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo, SDValue &Hi);
   void SplitVecRes_MLOAD(MaskedLoadSDNode *MLD, SDValue &Lo, SDValue &Hi);
@@ -942,6 +945,7 @@
   SDValue WidenVecOp_Convert(SDNode *N);
   SDValue WidenVecOp_FP_TO_XINT_SAT(SDNode *N);
   SDValue WidenVecOp_FCOPYSIGN(SDNode *N);
+  SDValue WidenVecOp_ISNAN(SDNode *N);
   SDValue WidenVecOp_VECREDUCE(SDNode *N);
   SDValue WidenVecOp_VECREDUCE_SEQ(SDNode *N);
 
Index: llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
===================================================================
--- llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -139,6 +139,8 @@
 
   case ISD::FLT_ROUNDS_: Res = PromoteIntRes_FLT_ROUNDS(N); break;
 
+  case ISD::ISNAN:       Res = PromoteIntRes_ISNAN(N); break;
+
   case ISD::AND:
   case ISD::OR:
   case ISD::XOR:
@@ -656,6 +658,14 @@
   return Res;
 }
 
+SDValue DAGTypeLegalizer::PromoteIntRes_ISNAN(SDNode *N) {
+  SDLoc DL(N);
+  EVT ResultVT = N->getValueType(0);
+  EVT NewResultVT = TLI.getTypeToTransformTo(*DAG.getContext(), ResultVT);
+  return DAG.getNode(N->getOpcode(), DL, NewResultVT, N->getOperand(0),
+                     N->getFlags());
+}
+
 SDValue DAGTypeLegalizer::PromoteIntRes_INT_EXTEND(SDNode *N) {
   EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
   SDLoc dl(N);
Index: llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
===================================================================
--- llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -1184,6 +1184,10 @@
     Action = TLI.getOperationAction(
         Node->getOpcode(), Node->getOperand(1).getValueType());
     break;
+  case ISD::ISNAN:
+    Action = TLI.getOperationAction(Node->getOpcode(),
+                                    Node->getOperand(0).getValueType());
+    break;
   default:
     if (Node->getOpcode() >= ISD::BUILTIN_OP_END) {
       Action = TargetLowering::Legal;
@@ -3107,6 +3111,12 @@
   case ISD::FCOPYSIGN:
     Results.push_back(ExpandFCOPYSIGN(Node));
     break;
+  case ISD::ISNAN:
+    if (SDValue Expanded =
+            TLI.expandISNAN(Node->getValueType(0), Node->getOperand(0),
+                            Node->getFlags(), SDLoc(Node), DAG))
+      Results.push_back(Expanded);
+    break;
   case ISD::FNEG:
     Results.push_back(ExpandFNEG(Node));
     break;
Index: llvm/lib/Analysis/ConstantFolding.cpp
===================================================================
--- llvm/lib/Analysis/ConstantFolding.cpp
+++ llvm/lib/Analysis/ConstantFolding.cpp
@@ -1579,9 +1579,10 @@
     return !Call->isStrictFP();
 
   // Sign operations are actually bitwise operations, they do not raise
-  // exceptions even for SNANs.
+  // exceptions even for SNANs. The same applies to classification functions.
   case Intrinsic::fabs:
   case Intrinsic::copysign:
+  case Intrinsic::isnan:
   // Non-constrained variants of rounding operations means default FP
   // environment, they can be folded in any case.
   case Intrinsic::ceil:
@@ -1945,6 +1946,9 @@
       return ConstantInt::get(Ty, Int);
     }
 
+    if (IntrinsicID == Intrinsic::isnan)
+      return ConstantInt::get(Ty, U.isNaN());
+
     if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
       return nullptr;
 
Index: llvm/include/llvm/IR/Intrinsics.td
===================================================================
--- llvm/include/llvm/IR/Intrinsics.td
+++ llvm/include/llvm/IR/Intrinsics.td
@@ -715,6 +715,14 @@
   def int_set_rounding  : DefaultAttrsIntrinsic<[], [llvm_i32_ty]>;
 }
 
+//===--------------- Floating Point Test Intrinsics -----------------------===//
+//
+
+def int_isnan
+    : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
+                            [llvm_anyfloat_ty],
+                            [IntrNoMem, IntrWillReturn]>;
+
 //===--------------- Constrained Floating Point Intrinsics ----------------===//
 //
 
Index: llvm/include/llvm/CodeGen/TargetLowering.h
===================================================================
--- llvm/include/llvm/CodeGen/TargetLowering.h
+++ llvm/include/llvm/CodeGen/TargetLowering.h
@@ -4406,6 +4406,10 @@
   /// \returns The expansion result
   SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const;
 
+  /// Expand isnan depending on function attributes.
+  SDValue expandISNAN(EVT ResultVT, SDValue Op, SDNodeFlags Flags,
+                      const SDLoc &DL, SelectionDAG &DAG) const;
+
   /// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes,
   /// vector nodes can only succeed if all operations are legal/custom.
   /// \param N Node to expand
Index: llvm/include/llvm/CodeGen/ISDOpcodes.h
===================================================================
--- llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -476,6 +476,10 @@
   /// Returns platform specific canonical encoding of a floating point number.
   FCANONICALIZE,
 
+  /// Performs check of floating point number property, defined by IEEE-754. The
+  /// only operand is the floating point value to check. Returns boolean value.
+  ISNAN,
+
   /// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector
   /// with the specified, possibly variable, elements. The types of the
   /// operands must match the vector element type, except that integer types
Index: llvm/docs/LangRef.rst
===================================================================
--- llvm/docs/LangRef.rst
+++ llvm/docs/LangRef.rst
@@ -20927,6 +20927,52 @@
 modes.
 
 
+Floating Point Test Intrinsics
+------------------------------
+
+These functions get properties of floating point values.
+
+
+'``llvm.isnan``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+::
+
+      declare i1 @llvm.isnan(<fptype> <op>)
+      declare <N x i1> @llvm.isnan(<vector-fptype> <op>)
+
+Overview:
+"""""""""
+
+The '``llvm.isnan``' intrinsic returns a boolean value or vector of boolean
+values depending on whether the value is NaN.
+
+If the operand is a floating-point scalar, then the result type is a
+boolean (:ref:`i1 <t_integer>`).
+
+If the operand is a floating-point vector, then the result type is a
+vector of boolean with the same number of elements as the operand.
+
+Arguments:
+""""""""""
+
+The argument to the '``llvm.isnan``' intrinsic must be
+:ref:`floating-point <t_floating>` or :ref:`vector <t_vector>`
+of floating-point values.
+
+
+Semantics:
+""""""""""
+
+The function tests if ``op`` is NaN. If ``op`` is a vector, then the
+check is made element by element. Each test yields an :ref:`i1 <t_integer>`
+result, which is ``true``, if the value is NaN. The function never raises
+floating point exceptions.
+
+
 General Intrinsics
 ------------------
 
Index: clang/test/CodeGen/strictfp_builtins.c
===================================================================
--- clang/test/CodeGen/strictfp_builtins.c
+++ clang/test/CodeGen/strictfp_builtins.c
@@ -17,7 +17,7 @@
 // CHECK-NEXT:    store i32 [[X:%.*]], i32* [[X_ADDR]], align 4
 // CHECK-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[STR_ADDR]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[X_ADDR]], align 4
-// CHECK-NEXT:    [[CALL:%.*]] = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i8* [[TMP0]], i32 [[TMP1]]) [[ATTR4:#.*]]
+// CHECK-NEXT:    [[CALL:%.*]] = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i8* [[TMP0]], i32 [[TMP1]]) #[[ATTR5:[0-9]+]]
 // CHECK-NEXT:    ret void
 //
 void p(char *str, int x) {
@@ -31,21 +31,21 @@
 // CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
 // CHECK-NEXT:    store double [[D:%.*]], double* [[D_ADDR]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[D_ADDR]], align 8
-// CHECK-NEXT:    [[ISZERO:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") [[ATTR4]]
+// CHECK-NEXT:    [[ISZERO:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]]
 // CHECK-NEXT:    br i1 [[ISZERO]], label [[FPCLASSIFY_END:%.*]], label [[FPCLASSIFY_NOT_ZERO:%.*]]
 // CHECK:       fpclassify_end:
 // CHECK-NEXT:    [[FPCLASSIFY_RESULT:%.*]] = phi i32 [ 4, [[ENTRY:%.*]] ], [ 0, [[FPCLASSIFY_NOT_ZERO]] ], [ 1, [[FPCLASSIFY_NOT_NAN:%.*]] ], [ [[TMP2:%.*]], [[FPCLASSIFY_NOT_INF:%.*]] ]
-// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @.str.1, i64 0, i64 0), i32 [[FPCLASSIFY_RESULT]]) [[ATTR4]]
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @.str.1, i64 0, i64 0), i32 [[FPCLASSIFY_RESULT]]) #[[ATTR5]]
 // CHECK-NEXT:    ret void
 // CHECK:       fpclassify_not_zero:
-// CHECK-NEXT:    [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") [[ATTR4]]
+// CHECK-NEXT:    [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") #[[ATTR5]]
 // CHECK-NEXT:    br i1 [[CMP]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_NAN]]
 // CHECK:       fpclassify_not_nan:
-// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) [[ATTR5:#.*]]
-// CHECK-NEXT:    [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") [[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR6:[0-9]+]]
+// CHECK-NEXT:    [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]]
 // CHECK-NEXT:    br i1 [[ISINF]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_INF]]
 // CHECK:       fpclassify_not_inf:
-// CHECK-NEXT:    [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") [[ATTR4]]
+// CHECK-NEXT:    [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") #[[ATTR5]]
 // CHECK-NEXT:    [[TMP2]] = select i1 [[ISNORMAL]], i32 2, i32 3
 // CHECK-NEXT:    br label [[FPCLASSIFY_END]]
 //
@@ -57,14 +57,14 @@
 
 // CHECK-LABEL: @test_fp16_isinf(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[LD_ADDR:%.*]] = alloca half, align 2
-// CHECK-NEXT:    store half [[H:%.*]], half* [[LD_ADDR]], align 2
-// CHECK-NEXT:    [[TMP0:%.*]] = load half, half* [[LD_ADDR]], align 2
-// CHECK-NEXT:    [[BITCAST:%.*]] = bitcast half [[TMP0]] to i16
-// CHECK-NEXT:    [[SHL1:%.*]] = shl i16 [[BITCAST]], 1
-// CHECK-NEXT:    [[CMP:%.*]] = icmp eq i16 [[SHL1]], -2048
-// CHECK-NEXT:    [[RES:%.*]] = zext i1 [[CMP]] to i32
-// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.[[#STRID:2]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
+// CHECK-NEXT:    [[H_ADDR:%.*]] = alloca half, align 2
+// CHECK-NEXT:    store half [[H:%.*]], half* [[H_ADDR]], align 2
+// CHECK-NEXT:    [[TMP0:%.*]] = load half, half* [[H_ADDR]], align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast half [[TMP0]] to i16
+// CHECK-NEXT:    [[TMP2:%.*]] = shl i16 [[TMP1]], 1
+// CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i16 [[TMP2]], -2048
+// CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.2, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR5]]
 // CHECK-NEXT:    ret void
 //
 void test_fp16_isinf(__fp16 h) {
@@ -75,14 +75,14 @@
 
 // CHECK-LABEL: @test_float_isinf(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[LD_ADDR:%.*]] = alloca float, align 4
-// CHECK-NEXT:    store float [[F:%.*]], float* [[LD_ADDR]], align 4
-// CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[LD_ADDR]], align 4
-// CHECK-NEXT:    [[BITCAST:%.*]] = bitcast float [[TMP0]] to i32
-// CHECK-NEXT:    [[SHL1:%.*]] = shl i32 [[BITCAST]], 1
-// CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[SHL1]], -16777216
-// CHECK-NEXT:    [[RES:%.*]] = zext i1 [[CMP]] to i32
-// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
+// CHECK-NEXT:    [[F_ADDR:%.*]] = alloca float, align 4
+// CHECK-NEXT:    store float [[F:%.*]], float* [[F_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[F_ADDR]], align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast float [[TMP0]] to i32
+// CHECK-NEXT:    [[TMP2:%.*]] = shl i32 [[TMP1]], 1
+// CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i32 [[TMP2]], -16777216
+// CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.3, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR5]]
 // CHECK-NEXT:    ret void
 //
 void test_float_isinf(float f) {
@@ -93,14 +93,14 @@
 
 // CHECK-LABEL: @test_double_isinf(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[LD_ADDR:%.*]] = alloca double, align 8
-// CHECK-NEXT:    store double [[D:%.*]], double* [[LD_ADDR]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[LD_ADDR]], align 8
-// CHECK-NEXT:    [[BITCAST:%.*]] = bitcast double [[TMP0]] to i64
-// CHECK-NEXT:    [[SHL1:%.*]] = shl i64 [[BITCAST]], 1
-// CHECK-NEXT:    [[CMP:%.*]] = icmp eq i64 [[SHL1]], -9007199254740992
-// CHECK-NEXT:    [[RES:%.*]] = zext i1 [[CMP]] to i32
-// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
+// CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    store double [[D:%.*]], double* [[D_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[D_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast double [[TMP0]] to i64
+// CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[TMP1]], 1
+// CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i64 [[TMP2]], -9007199254740992
+// CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.4, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR5]]
 // CHECK-NEXT:    ret void
 //
 void test_double_isinf(double d) {
@@ -111,14 +111,14 @@
 
 // CHECK-LABEL: @test_fp16_isfinite(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[LD_ADDR:%.*]] = alloca half, align 2
-// CHECK-NEXT:    store half [[H:%.*]], half* [[LD_ADDR]], align 2
-// CHECK-NEXT:    [[TMP0:%.*]] = load half, half* [[LD_ADDR]], align 2
-// CHECK-NEXT:    [[BITCAST:%.*]] = bitcast half [[TMP0]] to i16
-// CHECK-NEXT:    [[SHL1:%.*]] = shl i16 [[BITCAST]], 1
-// CHECK-NEXT:    [[CMP:%.*]] = icmp ult i16 [[SHL1]], -2048
-// CHECK-NEXT:    [[RES:%.*]] = zext i1 [[CMP]] to i32
-// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
+// CHECK-NEXT:    [[H_ADDR:%.*]] = alloca half, align 2
+// CHECK-NEXT:    store half [[H:%.*]], half* [[H_ADDR]], align 2
+// CHECK-NEXT:    [[TMP0:%.*]] = load half, half* [[H_ADDR]], align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast half [[TMP0]] to i16
+// CHECK-NEXT:    [[TMP2:%.*]] = shl i16 [[TMP1]], 1
+// CHECK-NEXT:    [[TMP3:%.*]] = icmp ult i16 [[TMP2]], -2048
+// CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.5, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR5]]
 // CHECK-NEXT:    ret void
 //
 void test_fp16_isfinite(__fp16 h) {
@@ -129,14 +129,14 @@
 
 // CHECK-LABEL: @test_float_isfinite(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[LD_ADDR:%.*]] = alloca float, align 4
-// CHECK-NEXT:    store float [[F:%.*]], float* [[LD_ADDR]], align 4
-// CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[LD_ADDR]], align 4
-// CHECK-NEXT:    [[BITCAST:%.*]] = bitcast float [[TMP0]] to i32
-// CHECK-NEXT:    [[SHL1:%.*]] = shl i32 [[BITCAST]], 1
-// CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[SHL1]], -16777216
-// CHECK-NEXT:    [[RES:%.*]] = zext i1 [[CMP]] to i32
-// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
+// CHECK-NEXT:    [[F_ADDR:%.*]] = alloca float, align 4
+// CHECK-NEXT:    store float [[F:%.*]], float* [[F_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[F_ADDR]], align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast float [[TMP0]] to i32
+// CHECK-NEXT:    [[TMP2:%.*]] = shl i32 [[TMP1]], 1
+// CHECK-NEXT:    [[TMP3:%.*]] = icmp ult i32 [[TMP2]], -16777216
+// CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.6, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR5]]
 // CHECK-NEXT:    ret void
 //
 void test_float_isfinite(float f) {
@@ -147,14 +147,14 @@
 
 // CHECK-LABEL: @test_double_isfinite(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[LD_ADDR:%.*]] = alloca double, align 8
-// CHECK-NEXT:    store double [[D:%.*]], double* [[LD_ADDR]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[LD_ADDR]], align 8
-// CHECK-NEXT:    [[BITCAST:%.*]] = bitcast double [[TMP0]] to i64
-// CHECK-NEXT:    [[SHL1:%.*]] = shl i64 [[BITCAST]], 1
-// CHECK-NEXT:    [[CMP:%.*]] = icmp ult i64 [[SHL1]], -9007199254740992
-// CHECK-NEXT:    [[RES:%.*]] = zext i1 [[CMP]] to i32
-// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
+// CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
+// CHECK-NEXT:    store double [[D:%.*]], double* [[D_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[D_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast double [[TMP0]] to i64
+// CHECK-NEXT:    [[TMP2:%.*]] = shl i64 [[TMP1]], 1
+// CHECK-NEXT:    [[TMP3:%.*]] = icmp ult i64 [[TMP2]], -9007199254740992
+// CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.7, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR5]]
 // CHECK-NEXT:    ret void
 //
 void test_double_isfinite(double d) {
@@ -168,13 +168,13 @@
 // CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
 // CHECK-NEXT:    store double [[D:%.*]], double* [[D_ADDR]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[D_ADDR]], align 8
-// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) [[ATTR5]]
-// CHECK-NEXT:    [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") [[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR6]]
+// CHECK-NEXT:    [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]]
 // CHECK-NEXT:    [[TMP2:%.*]] = bitcast double [[TMP0]] to i64
 // CHECK-NEXT:    [[TMP3:%.*]] = icmp slt i64 [[TMP2]], 0
 // CHECK-NEXT:    [[TMP4:%.*]] = select i1 [[TMP3]], i32 -1, i32 1
 // CHECK-NEXT:    [[TMP5:%.*]] = select i1 [[ISINF]], i32 [[TMP4]], i32 0
-// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[TMP5]]) [[ATTR4]]
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str.8, i64 0, i64 0), i32 [[TMP5]]) #[[ATTR5]]
 // CHECK-NEXT:    ret void
 //
 void test_isinf_sign(double d) {
@@ -188,12 +188,9 @@
 // CHECK-NEXT:    [[H_ADDR:%.*]] = alloca half, align 2
 // CHECK-NEXT:    store half [[H:%.*]], half* [[H_ADDR]], align 2
 // CHECK-NEXT:    [[TMP0:%.*]] = load half, half* [[H_ADDR]], align 2
-// CHECK-NEXT:    [[BITCAST:%.*]] = bitcast half [[TMP0]] to i16
-// CHECK-NEXT:    [[ABS:%.*]] = and i16 [[BITCAST]], [[#%u,0x7FFF]]
-// CHECK-NEXT:    [[TMP1:%.*]] = sub i16 [[#%u,0x7C00]], [[ABS]]
-// CHECK-NEXT:    [[ISNAN:%.*]] = lshr i16 [[TMP1]], 15
-// CHECK-NEXT:    [[RES:%.*]] = zext i16 [[ISNAN]] to i32
-// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.isnan.f16(half [[TMP0]]) #[[ATTR5]]
+// CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.9, i64 0, i64 0), i32 [[TMP2]]) #[[ATTR5]]
 // CHECK-NEXT:    ret void
 //
 void test_fp16_isnan(__fp16 h) {
@@ -207,11 +204,9 @@
 // CHECK-NEXT:    [[F_ADDR:%.*]] = alloca float, align 4
 // CHECK-NEXT:    store float [[F:%.*]], float* [[F_ADDR]], align 4
 // CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[F_ADDR]], align 4
-// CHECK-NEXT:    [[BITCAST:%.*]] = bitcast float [[TMP0]] to i32
-// CHECK-NEXT:    [[ABS:%.*]] = and i32 [[BITCAST]], [[#%u,0x7FFFFFFF]]
-// CHECK-NEXT:    [[TMP1:%.*]] = sub i32 [[#%u,0x7F800000]], [[ABS]]
-// CHECK-NEXT:    [[ISNAN:%.*]] = lshr i32 [[TMP1]], 31
-// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[ISNAN]]) [[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.isnan.f32(float [[TMP0]]) #[[ATTR5]]
+// CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.10, i64 0, i64 0), i32 [[TMP2]]) #[[ATTR5]]
 // CHECK-NEXT:    ret void
 //
 void test_float_isnan(float f) {
@@ -225,12 +220,9 @@
 // CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
 // CHECK-NEXT:    store double [[D:%.*]], double* [[D_ADDR]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[D_ADDR]], align 8
-// CHECK-NEXT:    [[BITCAST:%.*]] = bitcast double [[TMP0]] to i64
-// CHECK-NEXT:    [[ABS:%.*]] = and i64 [[BITCAST]], [[#%u,0x7FFFFFFFFFFFFFFF]]
-// CHECK-NEXT:    [[TMP1:%.*]] = sub i64 [[#%u,0x7FF0000000000000]], [[ABS]]
-// CHECK-NEXT:    [[ISNAN:%.*]] = lshr i64 [[TMP1]], 63
-// CHECK-NEXT:    [[RES:%.*]] = trunc i64 [[ISNAN]] to i32
-// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.isnan.f64(double [[TMP0]]) #[[ATTR5]]
+// CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.11, i64 0, i64 0), i32 [[TMP2]]) #[[ATTR5]]
 // CHECK-NEXT:    ret void
 //
 void test_double_isnan(double d) {
@@ -244,14 +236,14 @@
 // CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
 // CHECK-NEXT:    store double [[D:%.*]], double* [[D_ADDR]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[D_ADDR]], align 8
-// CHECK-NEXT:    [[ISEQ:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"oeq", metadata !"fpexcept.strict") [[ATTR4]]
-// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) [[ATTR5]]
-// CHECK-NEXT:    [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"ult", metadata !"fpexcept.strict") [[ATTR4]]
-// CHECK-NEXT:    [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") [[ATTR4]]
+// CHECK-NEXT:    [[ISEQ:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR6]]
+// CHECK-NEXT:    [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"ult", metadata !"fpexcept.strict") #[[ATTR5]]
+// CHECK-NEXT:    [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") #[[ATTR5]]
 // CHECK-NEXT:    [[AND:%.*]] = and i1 [[ISEQ]], [[ISINF]]
 // CHECK-NEXT:    [[AND1:%.*]] = and i1 [[AND]], [[ISNORMAL]]
 // CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[AND1]] to i32
-// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[TMP2]]) [[ATTR4]]
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.12, i64 0, i64 0), i32 [[TMP2]]) #[[ATTR5]]
 // CHECK-NEXT:    ret void
 //
 void test_isnormal(double d) {
Index: clang/test/CodeGen/aarch64-strictfp-builtins.c
===================================================================
--- clang/test/CodeGen/aarch64-strictfp-builtins.c
+++ clang/test/CodeGen/aarch64-strictfp-builtins.c
@@ -1,3 +1,4 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 %s -emit-llvm -ffp-exception-behavior=maytrap -fexperimental-strict-floating-point -o - -triple arm64-none-linux-gnu | FileCheck %s
 
 // Test that the constrained intrinsics are picking up the exception
@@ -15,7 +16,7 @@
 // CHECK-NEXT:    store i32 [[X:%.*]], i32* [[X_ADDR]], align 4
 // CHECK-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[STR_ADDR]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[X_ADDR]], align 4
-// CHECK-NEXT:    [[CALL:%.*]] = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i8* [[TMP0]], i32 [[TMP1]])  [[ATTR4:#.*]]
+// CHECK-NEXT:    [[CALL:%.*]] = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i8* [[TMP0]], i32 [[TMP1]]) #[[ATTR3:[0-9]+]]
 // CHECK-NEXT:    ret void
 //
 void p(char *str, int x) {
@@ -27,13 +28,13 @@
 // CHECK-LABEL: @test_long_double_isinf(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[LD_ADDR:%.*]] = alloca fp128, align 16
-// CHECK-NEXT:    store fp128 [[D:%.*]], fp128* [[LD_ADDR]], align 16
+// CHECK-NEXT:    store fp128 [[LD:%.*]], fp128* [[LD_ADDR]], align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = load fp128, fp128* [[LD_ADDR]], align 16
-// CHECK-NEXT:    [[BITCAST:%.*]] = bitcast fp128 [[TMP0]] to i128
-// CHECK-NEXT:    [[SHL1:%.*]] = shl i128 [[BITCAST]], 1
-// CHECK-NEXT:    [[CMP:%.*]] = icmp eq i128 [[SHL1]], -10384593717069655257060992658440192
-// CHECK-NEXT:    [[RES:%.*]] = zext i1 [[CMP]] to i32
-// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.[[#STRID:1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast fp128 [[TMP0]] to i128
+// CHECK-NEXT:    [[TMP2:%.*]] = shl i128 [[TMP1]], 1
+// CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i128 [[TMP2]], -10384593717069655257060992658440192
+// CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.1, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR3]]
 // CHECK-NEXT:    ret void
 //
 void test_long_double_isinf(long double ld) {
@@ -45,13 +46,13 @@
 // CHECK-LABEL: @test_long_double_isfinite(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[LD_ADDR:%.*]] = alloca fp128, align 16
-// CHECK-NEXT:    store fp128 [[D:%.*]], fp128* [[LD_ADDR]], align 16
+// CHECK-NEXT:    store fp128 [[LD:%.*]], fp128* [[LD_ADDR]], align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = load fp128, fp128* [[LD_ADDR]], align 16
-// CHECK-NEXT:    [[BITCAST:%.*]] = bitcast fp128 [[TMP0]] to i128
-// CHECK-NEXT:    [[SHL1:%.*]] = shl i128 [[BITCAST]], 1
-// CHECK-NEXT:    [[CMP:%.*]] = icmp ult i128 [[SHL1]], -10384593717069655257060992658440192
-// CHECK-NEXT:    [[RES:%.*]] = zext i1 [[CMP]] to i32
-// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast fp128 [[TMP0]] to i128
+// CHECK-NEXT:    [[TMP2:%.*]] = shl i128 [[TMP1]], 1
+// CHECK-NEXT:    [[TMP3:%.*]] = icmp ult i128 [[TMP2]], -10384593717069655257060992658440192
+// CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str.2, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR3]]
 // CHECK-NEXT:    ret void
 //
 void test_long_double_isfinite(long double ld) {
@@ -63,14 +64,11 @@
 // CHECK-LABEL: @test_long_double_isnan(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[LD_ADDR:%.*]] = alloca fp128, align 16
-// CHECK-NEXT:    store fp128 [[D:%.*]], fp128* [[LD_ADDR]], align 16
+// CHECK-NEXT:    store fp128 [[LD:%.*]], fp128* [[LD_ADDR]], align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = load fp128, fp128* [[LD_ADDR]], align 16
-// CHECK-NEXT:    [[BITCAST:%.*]] = bitcast fp128 [[TMP0]] to i128
-// CHECK-NEXT:    [[ABS:%.*]] = and i128 [[BITCAST]], 170141183460469231731687303715884105727
-// CHECK-NEXT:    [[TMP1:%.*]] = sub i128 170135991163610696904058773219554885632, [[ABS]]
-// CHECK-NEXT:    [[ISNAN:%.*]] = lshr i128 [[TMP1]], 127
-// CHECK-NEXT:    [[RES:%.*]] = trunc i128 [[ISNAN]] to i32
-// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]])
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.isnan.f128(fp128 [[TMP0]]) #[[ATTR3]]
+// CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.3, i64 0, i64 0), i32 [[TMP2]]) #[[ATTR3]]
 // CHECK-NEXT:    ret void
 //
 void test_long_double_isnan(long double ld) {
Index: clang/test/CodeGen/X86/strictfp_builtins.c
===================================================================
--- clang/test/CodeGen/X86/strictfp_builtins.c
+++ clang/test/CodeGen/X86/strictfp_builtins.c
@@ -17,7 +17,7 @@
 // CHECK-NEXT:    store i32 [[X:%.*]], i32* [[X_ADDR]], align 4
 // CHECK-NEXT:    [[TMP0:%.*]] = load i8*, i8** [[STR_ADDR]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[X_ADDR]], align 4
-// CHECK-NEXT:    [[CALL:%.*]] = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i8* [[TMP0]], i32 [[TMP1]]) [[ATTR4:#.*]]
+// CHECK-NEXT:    [[CALL:%.*]] = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i64 0, i64 0), i8* [[TMP0]], i32 [[TMP1]]) #[[ATTR3:[0-9]+]]
 // CHECK-NEXT:    ret void
 //
 void p(char *str, int x) {
@@ -29,13 +29,13 @@
 // CHECK-LABEL: @test_long_double_isinf(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[LD_ADDR:%.*]] = alloca x86_fp80, align 16
-// CHECK-NEXT:    store x86_fp80 [[D:%.*]], x86_fp80* [[LD_ADDR]], align 16
+// CHECK-NEXT:    store x86_fp80 [[LD:%.*]], x86_fp80* [[LD_ADDR]], align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = load x86_fp80, x86_fp80* [[LD_ADDR]], align 16
-// CHECK-NEXT:    [[BITCAST:%.*]] = bitcast x86_fp80 [[TMP0]] to i80
-// CHECK-NEXT:    [[SHL1:%.*]] = shl i80 [[BITCAST]], 1
-// CHECK-NEXT:    [[CMP:%.*]] = icmp eq i80 [[SHL1]], -18446744073709551616
-// CHECK-NEXT:    [[RES:%.*]] = zext i1 [[CMP]] to i32
-// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.[[#STRID:1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast x86_fp80 [[TMP0]] to i80
+// CHECK-NEXT:    [[TMP2:%.*]] = shl i80 [[TMP1]], 1
+// CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i80 [[TMP2]], -18446744073709551616
+// CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.1, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR3]]
 // CHECK-NEXT:    ret void
 //
 void test_long_double_isinf(long double ld) {
@@ -47,13 +47,13 @@
 // CHECK-LABEL: @test_long_double_isfinite(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[LD_ADDR:%.*]] = alloca x86_fp80, align 16
-// CHECK-NEXT:    store x86_fp80 [[D:%.*]], x86_fp80* [[LD_ADDR]], align 16
+// CHECK-NEXT:    store x86_fp80 [[LD:%.*]], x86_fp80* [[LD_ADDR]], align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = load x86_fp80, x86_fp80* [[LD_ADDR]], align 16
-// CHECK-NEXT:    [[BITCAST:%.*]] = bitcast x86_fp80 [[TMP0]] to i80
-// CHECK-NEXT:    [[SHL1:%.*]] = shl i80 [[BITCAST]], 1
-// CHECK-NEXT:    [[CMP:%.*]] = icmp ult i80 [[SHL1]], -18446744073709551616
-// CHECK-NEXT:    [[RES:%.*]] = zext i1 [[CMP]] to i32
-// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast x86_fp80 [[TMP0]] to i80
+// CHECK-NEXT:    [[TMP2:%.*]] = shl i80 [[TMP1]], 1
+// CHECK-NEXT:    [[TMP3:%.*]] = icmp ult i80 [[TMP2]], -18446744073709551616
+// CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str.2, i64 0, i64 0), i32 [[TMP4]]) #[[ATTR3]]
 // CHECK-NEXT:    ret void
 //
 void test_long_double_isfinite(long double ld) {
@@ -65,14 +65,11 @@
 // CHECK-LABEL: @test_long_double_isnan(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[LD_ADDR:%.*]] = alloca x86_fp80, align 16
-// CHECK-NEXT:    store x86_fp80 [[D:%.*]], x86_fp80* [[LD_ADDR]], align 16
+// CHECK-NEXT:    store x86_fp80 [[LD:%.*]], x86_fp80* [[LD_ADDR]], align 16
 // CHECK-NEXT:    [[TMP0:%.*]] = load x86_fp80, x86_fp80* [[LD_ADDR]], align 16
-// CHECK-NEXT:    [[BITCAST:%.*]] = bitcast x86_fp80 [[TMP0]] to i80
-// CHECK-NEXT:    [[ABS:%.*]] = and i80 [[BITCAST]], 604462909807314587353087
-// CHECK-NEXT:    [[TMP1:%.*]] = sub i80 604453686435277732577280, [[ABS]]
-// CHECK-NEXT:    [[ISNAN:%.*]] = lshr i80 [[TMP1]], 79
-// CHECK-NEXT:    [[RES:%.*]] = trunc i80 [[ISNAN]] to i32
-// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.[[#STRID:STRID+1]], i64 0, i64 0), i32 [[RES]]) [[ATTR4]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.isnan.f80(x86_fp80 [[TMP0]]) #[[ATTR3]]
+// CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
+// CHECK-NEXT:    call void @p(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.3, i64 0, i64 0), i32 [[TMP2]]) #[[ATTR3]]
 // CHECK-NEXT:    ret void
 //
 void test_long_double_isnan(long double ld) {
Index: clang/lib/CodeGen/CGBuiltin.cpp
===================================================================
--- clang/lib/CodeGen/CGBuiltin.cpp
+++ clang/lib/CodeGen/CGBuiltin.cpp
@@ -3020,37 +3020,17 @@
     // ZExt bool to int type.
     return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
   }
+
   case Builtin::BI__builtin_isnan: {
     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
     Value *V = EmitScalarExpr(E->getArg(0));
-    llvm::Type *Ty = V->getType();
-    const llvm::fltSemantics &Semantics = Ty->getFltSemantics();
-    if (!Builder.getIsFPConstrained() ||
-        Builder.getDefaultConstrainedExcept() == fp::ebIgnore ||
-        !Ty->isIEEE()) {
-      V = Builder.CreateFCmpUNO(V, V, "cmp");
-      return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
-    }
 
     if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM))
       return RValue::get(Result);
 
-    // NaN has all exp bits set and a non zero significand. Therefore:
-    // isnan(V) == ((exp mask - (abs(V) & exp mask)) < 0)
-    unsigned bitsize = Ty->getScalarSizeInBits();
-    llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize);
-    Value *IntV = Builder.CreateBitCast(V, IntTy);
-    APInt AndMask = APInt::getSignedMaxValue(bitsize);
-    Value *AbsV =
-        Builder.CreateAnd(IntV, llvm::ConstantInt::get(IntTy, AndMask));
-    APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt();
-    Value *Sub =
-        Builder.CreateSub(llvm::ConstantInt::get(IntTy, ExpMask), AbsV);
-    // V = sign bit (Sub) <=> V = (Sub < 0)
-    V = Builder.CreateLShr(Sub, llvm::ConstantInt::get(IntTy, bitsize - 1));
-    if (bitsize > 32)
-      V = Builder.CreateTrunc(V, ConvertType(E->getType()));
-    return RValue::get(V);
+    Function *F = CGM.getIntrinsic(Intrinsic::isnan, V->getType());
+    Value *Call = Builder.CreateCall(F, V);
+    return RValue::get(Builder.CreateZExt(Call, ConvertType(E->getType())));
   }
 
   case Builtin::BI__builtin_matrix_transpose: {
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to