https://github.com/SpencerAbson updated https://github.com/llvm/llvm-project/pull/118549
>From 298e1372cb8e6b2f7dddd707f42448521d3014bc Mon Sep 17 00:00:00 2001 From: Spencer Abson <spencer.ab...@arm.com> Date: Mon, 25 Nov 2024 21:47:20 +0000 Subject: [PATCH 1/3] [AArch64] Implement intrinsics for SME FP8 FMOPA --- clang/include/clang/Basic/arm_sme.td | 10 ++++ clang/lib/CodeGen/CGBuiltin.cpp | 6 ++ .../fp8-intrinsics/acle_sme2_fp8_fmopa.c | 55 +++++++++++++++++++ .../acle_sme2_fp8_imm.c | 18 ++++++ .../acle_sme2_fp8_mopa.c | 13 +++++ llvm/include/llvm/IR/IntrinsicsAArch64.td | 11 ++++ .../lib/Target/AArch64/AArch64SMEInstrInfo.td | 17 +++--- llvm/lib/Target/AArch64/SMEInstrFormats.td | 26 ++++++++- .../AArch64/sme2-fp8-intrinsics-fmopa.ll | 22 ++++++++ 9 files changed, 166 insertions(+), 12 deletions(-) create mode 100644 clang/test/CodeGen/AArch64/fp8-intrinsics/acle_sme2_fp8_fmopa.c create mode 100644 clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_imm.c create mode 100644 clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_mopa.c create mode 100644 llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-fmopa.ll diff --git a/clang/include/clang/Basic/arm_sme.td b/clang/include/clang/Basic/arm_sme.td index 0f689e82bdb742..71b2c7cdd04f93 100644 --- a/clang/include/clang/Basic/arm_sme.td +++ b/clang/include/clang/Basic/arm_sme.td @@ -824,4 +824,14 @@ let SMETargetGuard = "sme-lutv2" in { def SVLUTI4_ZT_X4 : SInst<"svluti4_zt_{d}_x4", "4i2.u", "cUc", MergeNone, "aarch64_sme_luti4_zt_x4", [IsStreaming, IsInZT0], [ImmCheck<0, ImmCheck0_0>]>; } +let SMETargetGuard = "sme-f8f32" in { + def SVMOPA_FP8_ZA32 : Inst<"svmopa_za32[_mf8]_m_fpm", "viPPdd>", "m", MergeNone, "aarch64_sme_fp8_fmopa_za32", + [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<0, ImmCheck0_3>]>; +} + +let SMETargetGuard = "sme-f8f16" in { + def SVMOPA_FP8_ZA16 : Inst<"svmopa_za16[_mf8]_m_fpm", "viPPdd>", "m", MergeNone, "aarch64_sme_fp8_fmopa_za16", + [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<0, ImmCheck0_1>]>; +} + } // let SVETargetGuard = InvalidMode diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 41c632ead6aa3c..c2e983eebebc10 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -10201,6 +10201,8 @@ CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) { case SVETypeFlags::EltTyInt64: return llvm::ScalableVectorType::get(Builder.getInt64Ty(), 2); + case SVETypeFlags::EltTyMFloat8: + return llvm::ScalableVectorType::get(Builder.getInt8Ty(), 16); case SVETypeFlags::EltTyFloat16: return llvm::ScalableVectorType::get(Builder.getHalfTy(), 8); case SVETypeFlags::EltTyBFloat16: @@ -11255,6 +11257,10 @@ Value *CodeGenFunction::EmitAArch64SMEBuiltinExpr(unsigned BuiltinID, BuiltinID == SME::BI__builtin_sme_svstr_za) return EmitSMELdrStr(TypeFlags, Ops, Builtin->LLVMIntrinsic); + // Emit set FPMR for intrinsics that require it + if (TypeFlags.setsFPMR()) + Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_set_fpmr), + Ops.pop_back_val()); // Handle builtins which require their multi-vector operands to be swapped swapCommutativeSMEOperands(BuiltinID, Ops); diff --git a/clang/test/CodeGen/AArch64/fp8-intrinsics/acle_sme2_fp8_fmopa.c b/clang/test/CodeGen/AArch64/fp8-intrinsics/acle_sme2_fp8_fmopa.c new file mode 100644 index 00000000000000..95d6383ab30efe --- /dev/null +++ b/clang/test/CodeGen/AArch64/fp8-intrinsics/acle_sme2_fp8_fmopa.c @@ -0,0 +1,55 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: aarch64-registered-target + +// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme -target-feature +sme-f8f16 -target-feature +sme-f8f32 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s +// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme -target-feature +sme-f8f16 -target-feature +sme-f8f32 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK +// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sme -target-feature +sme-f8f16 -target-feature +sme-f8f32 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s +// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sme -target-feature +sme-f8f16 -target-feature +sme-f8f32 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK +// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme -target-feature +sme-f8f16 -target-feature +sme-f8f32 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s + +#include <arm_sme.h> + +#ifdef SVE_OVERLOADED_FORMS +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3) A1##A2##A3 +#endif + + +// CHECK-LABEL: define dso_local void @test_svmopa_za16_mf8_m( +// CHECK-SAME: <vscale x 16 x i1> [[PN:%.*]], <vscale x 16 x i1> [[PM:%.*]], <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPMR:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR]]) +// CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmopa.za16(i32 1, <vscale x 16 x i1> [[PN]], <vscale x 16 x i1> [[PM]], <vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]]) +// CHECK-NEXT: ret void +// +// CPP-CHECK-LABEL: define dso_local void @_Z22test_svmopa_za16_mf8_mu10__SVBool_tS_u13__SVMfloat8_tS0_m( +// CPP-CHECK-SAME: <vscale x 16 x i1> [[PN:%.*]], <vscale x 16 x i1> [[PM:%.*]], <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPMR:%.*]]) #[[ATTR0:[0-9]+]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR]]) +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmopa.za16(i32 1, <vscale x 16 x i1> [[PN]], <vscale x 16 x i1> [[PM]], <vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]]) +// CPP-CHECK-NEXT: ret void +// +void test_svmopa_za16_mf8_m(svbool_t pn, svbool_t pm, svmfloat8_t zn, + svmfloat8_t zm, fpm_t fpmr) __arm_streaming __arm_inout("za") { + SVE_ACLE_FUNC(svmopa_za16,_mf8,_m_fpm)(1, pn, pm, zn, zm, fpmr); +} + +// CHECK-LABEL: define dso_local void @test_svmopa_za32_mf8_m( +// CHECK-SAME: <vscale x 16 x i1> [[PN:%.*]], <vscale x 16 x i1> [[PM:%.*]], <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPMR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR]]) +// CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmopa.za32(i32 3, <vscale x 16 x i1> [[PN]], <vscale x 16 x i1> [[PM]], <vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]]) +// CHECK-NEXT: ret void +// +// CPP-CHECK-LABEL: define dso_local void @_Z22test_svmopa_za32_mf8_mu10__SVBool_tS_u13__SVMfloat8_tS0_m( +// CPP-CHECK-SAME: <vscale x 16 x i1> [[PN:%.*]], <vscale x 16 x i1> [[PM:%.*]], <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPMR:%.*]]) #[[ATTR0]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR]]) +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmopa.za32(i32 3, <vscale x 16 x i1> [[PN]], <vscale x 16 x i1> [[PM]], <vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]]) +// CPP-CHECK-NEXT: ret void +// +void test_svmopa_za32_mf8_m(svbool_t pn, svbool_t pm, svmfloat8_t zn, + svmfloat8_t zm, fpm_t fpmr) __arm_streaming __arm_inout("za") { + SVE_ACLE_FUNC(svmopa_za32,_mf8,_m_fpm)(3, pn, pm, zn, zm, fpmr); +} diff --git a/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_imm.c b/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_imm.c new file mode 100644 index 00000000000000..62cad9cfa4c8fd --- /dev/null +++ b/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_imm.c @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -triple aarch64 -target-feature +sme -target-feature +sme2 -target-feature +sme-f8f16 -target-feature +sme-f8f32 -fsyntax-only -verify %s + +// REQUIRES: aarch64-registered-target + +#include <arm_sme.h> + +void test_svmopa(svbool_t pn, svbool_t pm, svmfloat8_t zn, svmfloat8_t zm, + fpm_t fpmr) __arm_streaming __arm_inout("za") { + // expected-error@+1 {{argument value 18446744073709551615 is outside the valid range [0, 1]}} + svmopa_za16_mf8_m_fpm(-1, pn, pm, zn, zm, fpmr); + // expected-error@+1 {{argument value 2 is outside the valid range [0, 1]}} + svmopa_za16_mf8_m_fpm(2, pn, pm, zn, zm, fpmr); + + // expected-error@+1 {{argument value 18446744073709551615 is outside the valid range [0, 3]}} + svmopa_za32_mf8_m_fpm(-1, pn, pm, zn, zm, fpmr); + // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} + svmopa_za32_mf8_m_fpm(4, pn, pm, zn, zm, fpmr); +} diff --git a/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_mopa.c b/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_mopa.c new file mode 100644 index 00000000000000..86426abcd43291 --- /dev/null +++ b/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_mopa.c @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 -triple aarch64 -target-feature +sme -verify -emit-llvm-only %s + +// REQUIRES: aarch64-registered-target + +#include <arm_sme.h> + +void test_features(svbool_t pn, svbool_t pm, svmfloat8_t zn, svmfloat8_t zm, + fpm_t fpmr) __arm_streaming __arm_inout("za") { + // expected-error@+1 {{'svmopa_za16_mf8_m_fpm' needs target feature sme,sme-f8f16}} + svmopa_za16_mf8_m_fpm(0, pn, pm, zn, zm, fpmr); + // expected-error@+1 {{'svmopa_za32_mf8_m_fpm' needs target feature sme,sme-f8f32}} + svmopa_za32_mf8_m_fpm(0, pn, pm, zn, zm, fpmr); +} diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td index 8ca00fc59a2554..8597f029c116da 100644 --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -2983,6 +2983,13 @@ let TargetPrefix = "aarch64" in { LLVMMatchType<0>, llvm_anyvector_ty], [ImmArg<ArgIndex<0>>]>; + class SME_FP8_OuterProduct_Intrinsic + : DefaultAttrsIntrinsic<[], + [llvm_i32_ty, + llvm_nxv16i1_ty, llvm_nxv16i1_ty, + llvm_nxv16i8_ty, llvm_nxv16i8_ty], + [ImmArg<ArgIndex<0>>, IntrInaccessibleMemOnly, IntrHasSideEffects]>; + def int_aarch64_sme_mopa : SME_OuterProduct_Intrinsic; def int_aarch64_sme_mops : SME_OuterProduct_Intrinsic; @@ -2998,6 +3005,10 @@ let TargetPrefix = "aarch64" in { def int_aarch64_sme_usmopa_wide : SME_OuterProduct_Intrinsic; def int_aarch64_sme_usmops_wide : SME_OuterProduct_Intrinsic; + // FP8 outer product + def int_aarch64_sme_fp8_fmopa_za16 : SME_FP8_OuterProduct_Intrinsic; + def int_aarch64_sme_fp8_fmopa_za32 : SME_FP8_OuterProduct_Intrinsic; + class SME_AddVectorToTile_Intrinsic : DefaultAttrsIntrinsic<[], [llvm_i32_ty, diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td index 37ac915d1d8808..9c657787d3492b 100644 --- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td @@ -990,7 +990,7 @@ defm FDOT_VG2_M2ZZI_BtoH : sme2p1_multi_vec_array_vg2_index_f8f16<"fdot", 0b11 defm FDOT_VG4_M4ZZI_BtoH : sme2p1_multi_vec_array_vg4_index_f8f16<"fdot", 0b100, ZZZZ_b_mul_r, ZPR4b8>; defm FDOT_VG2_M2ZZ_BtoH : sme2_dot_mla_add_sub_array_vg24_single<"fdot", 0b0010001, MatrixOp16, ZZ_b, ZPR4b8>; defm FDOT_VG4_M4ZZ_BtoH : sme2_dot_mla_add_sub_array_vg24_single<"fdot", 0b0110001, MatrixOp16, ZZZZ_b, ZPR4b8>; -// TODO: Replace nxv16i8 by nxv16f8 + defm FDOT_VG2_M2Z2Z_BtoH : sme2_dot_mla_add_sub_array_vg2_multi<"fdot", 0b0100100, MatrixOp16, ZZ_b_mul_r, nxv16i8, null_frag>; defm FDOT_VG4_M4Z4Z_BtoH : sme2_dot_mla_add_sub_array_vg4_multi<"fdot", 0b0100100, MatrixOp16, ZZZZ_b_mul_r, nxv16i8, null_frag>; @@ -998,23 +998,22 @@ def FMLAL_MZZI_BtoH : sme2_mla_ll_array_index_16b<"fmlal", 0b11, 0b00>; defm FMLAL_VG2_M2ZZI_BtoH : sme2_multi_vec_array_vg2_index_16b<"fmlal", 0b10, 0b111>; defm FMLAL_VG4_M4ZZI_BtoH : sme2_multi_vec_array_vg4_index_16b<"fmlal", 0b10, 0b110>; def FMLAL_VG2_MZZ_BtoH : sme2_mla_long_array_single_16b<"fmlal">; -// TODO: Replace nxv16i8 by nxv16f8 + defm FMLAL_VG2_M2ZZ_BtoH : sme2_fp_mla_long_array_vg2_single<"fmlal", 0b001, MatrixOp16, ZZ_b, ZPR4b8, nxv16i8, null_frag>; defm FMLAL_VG4_M4ZZ_BtoH : sme2_fp_mla_long_array_vg4_single<"fmlal", 0b001, MatrixOp16, ZZZZ_b, ZPR4b8, nxv16i8, null_frag>; defm FMLAL_VG2_M2Z2Z_BtoH : sme2_fp_mla_long_array_vg2_multi<"fmlal", 0b100, MatrixOp16, ZZ_b_mul_r, nxv16i8, null_frag>; defm FMLAL_VG4_M4Z4Z_BtoH : sme2_fp_mla_long_array_vg4_multi<"fmlal", 0b100, MatrixOp16, ZZZZ_b_mul_r, nxv16i8, null_frag>; -defm FMOPA_MPPZZ_BtoH : sme2p1_fmop_tile_f8f16<"fmopa", 0b1, 0b0, 0b01>; - +defm FMOPA_MPPZZ_BtoH : sme2_fp8_fmopa_za16<"fmopa", int_aarch64_sme_fp8_fmopa_za16>; } //[HasSMEF8F16] let Predicates = [HasSMEF8F32] in { -// TODO : Replace nxv16i8 by nxv16f8 + defm FDOT_VG2_M2ZZI_BtoS : sme2_multi_vec_array_vg2_index_32b<"fdot", 0b01, 0b0111, ZZ_b_mul_r, ZPR4b8, nxv16i8, null_frag>; defm FDOT_VG4_M4ZZI_BtoS : sme2_multi_vec_array_vg4_index_32b<"fdot", 0b0001, ZZZZ_b_mul_r, ZPR4b8, nxv16i8, null_frag>; defm FDOT_VG2_M2ZZ_BtoS : sme2_dot_mla_add_sub_array_vg24_single<"fdot", 0b0010011, MatrixOp32, ZZ_b, ZPR4b8>; defm FDOT_VG4_M4ZZ_BtoS : sme2_dot_mla_add_sub_array_vg24_single<"fdot", 0b0110011, MatrixOp32, ZZZZ_b, ZPR4b8>; -// TODO : Replace nxv16i8 by nxv16f8 + defm FDOT_VG2_M2Z2Z_BtoS : sme2_dot_mla_add_sub_array_vg2_multi<"fdot", 0b0100110, MatrixOp32, ZZ_b_mul_r, nxv16i8, null_frag>; defm FDOT_VG4_M4Z4Z_BtoS : sme2_dot_mla_add_sub_array_vg4_multi<"fdot", 0b0100110, MatrixOp32, ZZZZ_b_mul_r, nxv16i8, null_frag>; @@ -1024,16 +1023,14 @@ def FVDOTT_VG4_M2ZZI_BtoS : sme2_fp8_multi_vec_array_vg4_index<"fvdott", 0b1>; defm FMLALL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"fmlall", 0b01, 0b000, null_frag>; defm FMLALL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"fmlall", 0b10, 0b100, null_frag>; defm FMLALL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"fmlall", 0b00, 0b1000, null_frag>; -// TODO: Replace nxv16i8 by nxv16f8 + defm FMLALL_MZZ_BtoS : sme2_mla_ll_array_single<"fmlall", 0b01000, MatrixOp32, ZPR8, ZPR4b8, nxv16i8, null_frag>; defm FMLALL_VG2_M2ZZ_BtoS : sme2_mla_ll_array_vg24_single<"fmlall", 0b000001, MatrixOp32, ZZ_b, ZPR4b8>; defm FMLALL_VG4_M4ZZ_BtoS : sme2_mla_ll_array_vg24_single<"fmlall", 0b010001, MatrixOp32, ZZZZ_b, ZPR4b8>; defm FMLALL_VG2_M2Z2Z_BtoS : sme2_mla_ll_array_vg2_multi<"fmlall", 0b01000, MatrixOp32, ZZ_b_mul_r, nxv16i8, null_frag>; defm FMLALL_VG4_M4Z4Z_BtoS : sme2_mla_ll_array_vg4_multi<"fmlall", 0b01000, MatrixOp32, ZZZZ_b_mul_r, nxv16i8, null_frag>; - -defm FMOPA_MPPZZ_BtoS : sme_outer_product_fp32<0b0, 0b01, ZPR8, "fmopa", null_frag>; - +defm FMOPA_MPPZZ_BtoS : sme2_fp8_fmopa_za32<"fmopa", int_aarch64_sme_fp8_fmopa_za32>; } //[HasSMEF8F32] let Predicates = [HasSME2, HasSVEBFSCALE] in { diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td index 776472e72af05a..e6535f957e2024 100644 --- a/llvm/lib/Target/AArch64/SMEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td @@ -305,6 +305,21 @@ multiclass sme_outer_product_fp32<bit S, bits<2> sz, ZPRRegOp zpr_ty, string mne def : SME_ZA_Tile_TwoPred_TwoVec_Pat<NAME, op, timm32_0_3, nxv4i1, nxv4f32>; } +multiclass sme2_fp8_fmopa_za32<string mnemonic, SDPatternOperator intrinsic> { + def NAME : sme_fp_outer_product_inst<0, 0b01, 0b00, TileOp32, ZPR8, mnemonic>, SMEPseudo2Instr<NAME, 1> { + bits<2> ZAda; + let Inst{1-0} = ZAda; + let Inst{2} = 0b0; + + let Uses = [FPMR, FPCR]; + } + + let mayStore = 1, mayLoad = 1 in + def NAME # _PSEUDO : sme_outer_product_pseudo<ZPR8, SMEMatrixTileS>, SMEPseudo2Instr<NAME, 0>; + + def : SME_ZA_Tile_TwoPred_TwoVec_Pat<NAME, intrinsic, timm32_0_3, nxv16i1, nxv16i8>; +} + multiclass sme_outer_product_fp64<bit S, string mnemonic, SDPatternOperator op> { def NAME : sme_fp_outer_product_inst<S, 0b10, 0b00, TileOp64, ZPR64, mnemonic>, SMEPseudo2Instr<NAME, 1> { bits<3> ZAda; @@ -316,12 +331,19 @@ multiclass sme_outer_product_fp64<bit S, string mnemonic, SDPatternOperator op> def : SME_ZA_Tile_TwoPred_TwoVec_Pat<NAME, op, timm32_0_7, nxv2i1, nxv2f64>; } -multiclass sme2p1_fmop_tile_f8f16<string mnemonic, bit bf, bit s, bits<2> op> { - def NAME : sme_fp_outer_product_inst<s, {0,bf}, op, TileOp16, ZPR8, mnemonic> { +multiclass sme2_fp8_fmopa_za16<string mnemonic, SDPatternOperator intrinsic> { + def NAME : sme_fp_outer_product_inst<0, {0, 0b1}, 0b01, TileOp16, ZPR8, mnemonic>, SMEPseudo2Instr<NAME, 1> { bits<1> ZAda; let Inst{2-1} = 0b00; let Inst{0} = ZAda; + + let Uses = [FPMR, FPCR]; } + + let mayStore = 1, mayLoad = 1 in + def NAME # _PSEUDO : sme_outer_product_pseudo<ZPR8, SMEMatrixTileH>, SMEPseudo2Instr<NAME, 0>; + + def : SME_ZA_Tile_TwoPred_TwoVec_Pat<NAME, intrinsic, timm32_0_1, nxv16i1, nxv16i8>; } multiclass sme2p1_fmop_tile_fp16<string mnemonic, bit bf, bit s, ValueType vt, SDPatternOperator intrinsic = null_frag> { diff --git a/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-fmopa.ll b/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-fmopa.ll new file mode 100644 index 00000000000000..6e88cdf4e7fec3 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-fmopa.ll @@ -0,0 +1,22 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme-f8f16,+sme-f8f32 -force-streaming < %s | FileCheck %s + +define void @test_fmopa_16(<vscale x 16 x i1> %pn, <vscale x 16 x i1> %pm, <vscale x 16 x i8> %vn, <vscale x 16 x i8> %vm) { +; CHECK-LABEL: test_fmopa_16: +; CHECK: // %bb.0: +; CHECK-NEXT: fmopa za1.h, p0/m, p1/m, z0.b, z1.b +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.fp8.fmopa.za16(i32 1, <vscale x 16 x i1> %pn, <vscale x 16 x i1> %pm, + <vscale x 16 x i8> %vn, <vscale x 16 x i8> %vm) + ret void +} + +define void @test_fmopa_32(<vscale x 16 x i1> %pn, <vscale x 16 x i1> %pm, <vscale x 16 x i8> %vn, <vscale x 16 x i8> %vm) #0 { +; CHECK-LABEL: test_fmopa_32: +; CHECK: // %bb.0: +; CHECK-NEXT: fmopa za3.s, p0/m, p1/m, z0.b, z1.b +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.fp8.fmopa.za32(i32 3, <vscale x 16 x i1> %pn, <vscale x 16 x i1> %pm, + <vscale x 16 x i8> %vn, <vscale x 16 x i8> %vm) + ret void +} >From e53d2b794a0de817bcd94b0049c4eb12904180b9 Mon Sep 17 00:00:00 2001 From: Spencer Abson <spencer.ab...@arm.com> Date: Tue, 3 Dec 2024 18:27:06 +0000 Subject: [PATCH 2/3] [AArch64] Implement intrinsics for SME FP8 FMLAL/FMLALL --- clang/include/clang/Basic/arm_sme.td | 16 ++ .../fp8-intrinsics/acle_sme2_fp8_mla.c | 128 +++++++++++ .../acle_sme2_fp8_imm.c | 34 +++ .../aarch64-fp8-intrinsics/acle_sme_fp8_mla.c | 26 +++ llvm/include/llvm/IR/IntrinsicsAArch64.td | 63 ++++-- .../lib/Target/AArch64/AArch64SMEInstrInfo.td | 45 ++-- llvm/lib/Target/AArch64/SMEInstrFormats.td | 211 +++++++++--------- .../AArch64/sme2-fp8-intrinsics-mla.ll | 116 ++++++++++ 8 files changed, 496 insertions(+), 143 deletions(-) create mode 100644 clang/test/CodeGen/AArch64/fp8-intrinsics/acle_sme2_fp8_mla.c create mode 100644 clang/test/Sema/aarch64-fp8-intrinsics/acle_sme_fp8_mla.c create mode 100644 llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-mla.ll diff --git a/clang/include/clang/Basic/arm_sme.td b/clang/include/clang/Basic/arm_sme.td index 71b2c7cdd04f93..d3fb9de0b71070 100644 --- a/clang/include/clang/Basic/arm_sme.td +++ b/clang/include/clang/Basic/arm_sme.td @@ -827,11 +827,27 @@ let SMETargetGuard = "sme-lutv2" in { let SMETargetGuard = "sme-f8f32" in { def SVMOPA_FP8_ZA32 : Inst<"svmopa_za32[_mf8]_m_fpm", "viPPdd>", "m", MergeNone, "aarch64_sme_fp8_fmopa_za32", [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<0, ImmCheck0_3>]>; + + // FMLALL (indexed) + def SVMLA_FP8_ZA32_VG4x1 : Inst<"svmla_lane_za32[_mf8]_vg4x1_fpm", "vmddi>", "m", MergeNone, "aarch64_sme_fp8_fmlall_lane_za32_vg4x1", + [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<3, ImmCheck0_15>]>; + def SVMLA_FP8_ZA32_VG4x2 : Inst<"svmla_lane_za32[_mf8]_vg4x2_fpm", "vm2di>", "m", MergeNone, "aarch64_sme_fp8_fmlall_lane_za32_vg4x2", + [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<3, ImmCheck0_15>]>; + def SVMLA_FP8_ZA16_VG4x4 : Inst<"svmla_lane_za32[_mf8]_vg4x4_fpm", "vm4di>", "m", MergeNone, "aarch64_sme_fp8_fmlall_lane_za32_vg4x4", + [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<3, ImmCheck0_15>]>; } let SMETargetGuard = "sme-f8f16" in { def SVMOPA_FP8_ZA16 : Inst<"svmopa_za16[_mf8]_m_fpm", "viPPdd>", "m", MergeNone, "aarch64_sme_fp8_fmopa_za16", [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<0, ImmCheck0_1>]>; + + // FMLAL (indexed) + def SVMLA_FP8_ZA16_VG2x1 : Inst<"svmla_lane_za16[_mf8]_vg2x1_fpm", "vmddi>", "m", MergeNone, "aarch64_sme_fp8_fmlal_lane_za16_vg2x1", + [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<3, ImmCheck0_15>]>; + def SVMLA_FP8_ZA16_VG2x2 : Inst<"svmla_lane_za16[_mf8]_vg2x2_fpm", "vm2di>", "m", MergeNone, "aarch64_sme_fp8_fmlal_lane_za16_vg2x2", + [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<3, ImmCheck0_15>]>; + def SVMLA_FP8_ZA16_VG2x4 : Inst<"svmla_lane_za16[_mf8]_vg2x4_fpm", "vm4di>", "m", MergeNone, "aarch64_sme_fp8_fmlal_lane_za16_vg2x4", + [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<3, ImmCheck0_15>]>; } } // let SVETargetGuard = InvalidMode diff --git a/clang/test/CodeGen/AArch64/fp8-intrinsics/acle_sme2_fp8_mla.c b/clang/test/CodeGen/AArch64/fp8-intrinsics/acle_sme2_fp8_mla.c new file mode 100644 index 00000000000000..dc8da997f7aeb3 --- /dev/null +++ b/clang/test/CodeGen/AArch64/fp8-intrinsics/acle_sme2_fp8_mla.c @@ -0,0 +1,128 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: aarch64-registered-target + +// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme -target-feature +sme-f8f16 -target-feature +sme-f8f32 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s +// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme -target-feature +sme-f8f16 -target-feature +sme-f8f32 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK +// RUN: %clang_cc1 -DSME_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sme -target-feature +sme-f8f16 -target-feature +sme-f8f32 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s +// RUN: %clang_cc1 -DSME_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sme -target-feature +sme-f8f16 -target-feature +sme-f8f32 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK +// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme -target-feature +sme-f8f16 -target-feature +sme-f8f32 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s + +#include <arm_sme.h> + +#ifdef SME_OVERLOADED_FORMS +#define SME_ACLE_FUNC(A1,A2_UNUSED,A3) A1##A3 +#else +#define SME_ACLE_FUNC(A1,A2,A3) A1##A2##A3 +#endif + +// FMLAL (indexed) + +// CHECK-LABEL: define dso_local void @test_svmla_lane_za16_vg2x1( +// CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]]) +// CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x1(i32 [[SLICE]], <vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 0) +// CHECK-NEXT: ret void +// +// CPP-CHECK-LABEL: define dso_local void @_Z26test_svmla_lane_za16_vg2x1ju13__SVMfloat8_tS_m( +// CPP-CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0:[0-9]+]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]]) +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x1(i32 [[SLICE]], <vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 0) +// CPP-CHECK-NEXT: ret void +// +void test_svmla_lane_za16_vg2x1(uint32_t slice, svmfloat8_t zn, svmfloat8_t zm, fpm_t fpm) __arm_streaming __arm_inout("za") { + SME_ACLE_FUNC(svmla_lane_za16,_mf8,_vg2x1_fpm)(slice, zn, zm, 0, fpm); +} + +// CHECK-LABEL: define dso_local void @test_svmla_lane_za16_vg2x2( +// CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]]) +// CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x2(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZM]], i32 15) +// CHECK-NEXT: ret void +// +// CPP-CHECK-LABEL: define dso_local void @_Z26test_svmla_lane_za16_vg2x2j13svmfloat8x2_tu13__SVMfloat8_tm( +// CPP-CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]]) +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x2(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZM]], i32 15) +// CPP-CHECK-NEXT: ret void +// +void test_svmla_lane_za16_vg2x2(uint32_t slice, svmfloat8x2_t zn, svmfloat8_t zm, fpm_t fpm) __arm_streaming __arm_inout("za") { + SME_ACLE_FUNC(svmla_lane_za16,_mf8,_vg2x2_fpm)(slice, zn, zm, 15, fpm); +} + +// CHECK-LABEL: define dso_local void @test_svmla_lane_za16_vg2x4( +// CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZN_COERCE2:%.*]], <vscale x 16 x i8> [[ZN_COERCE3:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]]) +// CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x4(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZN_COERCE2]], <vscale x 16 x i8> [[ZN_COERCE3]], <vscale x 16 x i8> [[ZM]], i32 7) +// CHECK-NEXT: ret void +// +// CPP-CHECK-LABEL: define dso_local void @_Z26test_svmla_lane_za16_vg2x4j13svmfloat8x4_tu13__SVMfloat8_tm( +// CPP-CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZN_COERCE2:%.*]], <vscale x 16 x i8> [[ZN_COERCE3:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]]) +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x4(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZN_COERCE2]], <vscale x 16 x i8> [[ZN_COERCE3]], <vscale x 16 x i8> [[ZM]], i32 7) +// CPP-CHECK-NEXT: ret void +// +void test_svmla_lane_za16_vg2x4(uint32_t slice, svmfloat8x4_t zn, svmfloat8_t zm, fpm_t fpm) __arm_streaming __arm_inout("za") { + SME_ACLE_FUNC(svmla_lane_za16,_mf8,_vg2x4_fpm)(slice, zn, zm, 7, fpm); +} + +// FMLALL (indexed) + +// CHECK-LABEL: define dso_local void @test_svmla_lane_za32_vg4x1( +// CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]]) +// CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x1(i32 [[SLICE]], <vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 0) +// CHECK-NEXT: ret void +// +// CPP-CHECK-LABEL: define dso_local void @_Z26test_svmla_lane_za32_vg4x1ju13__SVMfloat8_tS_m( +// CPP-CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]]) +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x1(i32 [[SLICE]], <vscale x 16 x i8> [[ZN]], <vscale x 16 x i8> [[ZM]], i32 0) +// CPP-CHECK-NEXT: ret void +// +void test_svmla_lane_za32_vg4x1(uint32_t slice, svmfloat8_t zn, svmfloat8_t zm, fpm_t fpm) __arm_streaming __arm_inout("za") { + SME_ACLE_FUNC(svmla_lane_za32,_mf8,_vg4x1_fpm)(slice, zn, zm, 0, fpm); +} + +// CHECK-LABEL: define dso_local void @test_svmla_lane_za32_vg4x2( +// CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]]) +// CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x2(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZM]], i32 15) +// CHECK-NEXT: ret void +// +// CPP-CHECK-LABEL: define dso_local void @_Z26test_svmla_lane_za32_vg4x2j13svmfloat8x2_tu13__SVMfloat8_tm( +// CPP-CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]]) +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x2(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZM]], i32 15) +// CPP-CHECK-NEXT: ret void +// +void test_svmla_lane_za32_vg4x2(uint32_t slice, svmfloat8x2_t zn, svmfloat8_t zm, fpm_t fpm) __arm_streaming __arm_inout("za") { + SME_ACLE_FUNC(svmla_lane_za32,_mf8,_vg4x2_fpm)(slice, zn, zm, 15, fpm); +} + +// CHECK-LABEL: define dso_local void @test_svmla_lane_za32_vg4x4( +// CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZN_COERCE2:%.*]], <vscale x 16 x i8> [[ZN_COERCE3:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]]) +// CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x4(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZN_COERCE2]], <vscale x 16 x i8> [[ZN_COERCE3]], <vscale x 16 x i8> [[ZM]], i32 7) +// CHECK-NEXT: ret void +// +// CPP-CHECK-LABEL: define dso_local void @_Z26test_svmla_lane_za32_vg4x4j13svmfloat8x4_tu13__SVMfloat8_tm( +// CPP-CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZN_COERCE2:%.*]], <vscale x 16 x i8> [[ZN_COERCE3:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPM:%.*]]) #[[ATTR0]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPM]]) +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x4(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZN_COERCE2]], <vscale x 16 x i8> [[ZN_COERCE3]], <vscale x 16 x i8> [[ZM]], i32 7) +// CPP-CHECK-NEXT: ret void +// +void test_svmla_lane_za32_vg4x4(uint32_t slice, svmfloat8x4_t zn, svmfloat8_t zm, fpm_t fpm) __arm_streaming __arm_inout("za") { + SME_ACLE_FUNC(svmla_lane_za32,_mf8,_vg4x4_fpm)(slice, zn, zm, 7, fpm); +} diff --git a/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_imm.c b/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_imm.c index 62cad9cfa4c8fd..c60b179a43e15d 100644 --- a/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_imm.c +++ b/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme2_fp8_imm.c @@ -16,3 +16,37 @@ void test_svmopa(svbool_t pn, svbool_t pm, svmfloat8_t zn, svmfloat8_t zm, // expected-error@+1 {{argument value 4 is outside the valid range [0, 3]}} svmopa_za32_mf8_m_fpm(4, pn, pm, zn, zm, fpmr); } + +void test_svmla(uint32_t slice, svmfloat8_t zn, svmfloat8x2_t znx2, svmfloat8x4_t znx4, + fpm_t fpmr) __arm_streaming __arm_inout("za") { + // expected-error@+1 {{argument value 18446744073709551615 is outside the valid range [0, 15]}} + svmla_lane_za16_mf8_vg2x1_fpm(slice, zn, zn, -1, fpmr); + // expected-error@+1 {{argument value 16 is outside the valid range [0, 15]}} + svmla_lane_za16_mf8_vg2x1_fpm(slice, zn, zn, 16, fpmr); + + // expected-error@+1 {{argument value 18446744073709551615 is outside the valid range [0, 15]}} + svmla_lane_za16_mf8_vg2x2_fpm(slice, znx2, zn, -1, fpmr); + // expected-error@+1 {{argument value 16 is outside the valid range [0, 15]}} + svmla_lane_za16_mf8_vg2x2_fpm(slice, znx2, zn, 16, fpmr); + + // expected-error@+1 {{argument value 18446744073709551615 is outside the valid range [0, 15]}} + svmla_lane_za16_mf8_vg2x4_fpm(slice, znx4, zn, -1, fpmr); + // expected-error@+1 {{argument value 16 is outside the valid range [0, 15]}} + svmla_lane_za16_mf8_vg2x4_fpm(slice, znx4, zn, 16, fpmr); + + // expected-error@+1 {{argument value 18446744073709551615 is outside the valid range [0, 15]}} + svmla_lane_za32_mf8_vg4x1_fpm(slice, zn, zn, -1, fpmr); + // expected-error@+1 {{argument value 16 is outside the valid range [0, 15]}} + svmla_lane_za32_mf8_vg4x1_fpm(slice, zn, zn, 16, fpmr); + + // expected-error@+1 {{argument value 18446744073709551615 is outside the valid range [0, 15]}} + svmla_lane_za32_mf8_vg4x2_fpm(slice, znx2, zn, -1, fpmr); + // expected-error@+1 {{argument value 16 is outside the valid range [0, 15]}} + svmla_lane_za32_mf8_vg4x2_fpm(slice, znx2, zn, 16, fpmr); + + // expected-error@+1 {{argument value 18446744073709551615 is outside the valid range [0, 15]}} + svmla_lane_za32_mf8_vg4x4_fpm(slice, znx4, zn, -1, fpmr); + // expected-error@+1 {{argument value 16 is outside the valid range [0, 15]}} + svmla_lane_za32_mf8_vg4x4_fpm(slice, znx4, zn, 16, fpmr); + +} diff --git a/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme_fp8_mla.c b/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme_fp8_mla.c new file mode 100644 index 00000000000000..25255f1f469b2b --- /dev/null +++ b/clang/test/Sema/aarch64-fp8-intrinsics/acle_sme_fp8_mla.c @@ -0,0 +1,26 @@ +// RUN: %clang_cc1 -triple aarch64 -target-feature +sme -verify -emit-llvm-only %s + +// REQUIRES: aarch64-registered-target + +#include <arm_sme.h> + +void test_svmla(uint32_t slice, svmfloat8_t zn, svmfloat8x2_t znx2, svmfloat8x4_t znx4, + fpm_t fpmr) __arm_streaming __arm_inout("za") { + // expected-error@+1 {{'svmla_lane_za16_mf8_vg2x1_fpm' needs target feature sme,sme-f8f16}} + svmla_lane_za16_mf8_vg2x1_fpm(slice, zn, zn, 0, fpmr); + + // expected-error@+1 {{'svmla_lane_za16_mf8_vg2x2_fpm' needs target feature sme,sme-f8f16}} + svmla_lane_za16_mf8_vg2x2_fpm(slice, znx2, zn, 0, fpmr); + + // expected-error@+1 {{'svmla_lane_za16_mf8_vg2x4_fpm' needs target feature sme,sme-f8f16}} + svmla_lane_za16_mf8_vg2x4_fpm(slice, znx4, zn, 0, fpmr); + + // expected-error@+1 {{'svmla_lane_za32_mf8_vg4x1_fpm' needs target feature sme,sme-f8f32}} + svmla_lane_za32_mf8_vg4x1_fpm(slice, zn, zn, 0, fpmr); + + // expected-error@+1 {{'svmla_lane_za32_mf8_vg4x2_fpm' needs target feature sme,sme-f8f32}} + svmla_lane_za32_mf8_vg4x2_fpm(slice, znx2, zn, 0, fpmr); + + // expected-error@+1 {{'svmla_lane_za32_mf8_vg4x4_fpm' needs target feature sme,sme-f8f32}} + svmla_lane_za32_mf8_vg4x4_fpm(slice, znx4, zn, 0, fpmr); +} \ No newline at end of file diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td index 8597f029c116da..5236c858863ac9 100644 --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -3871,19 +3871,52 @@ def int_aarch64_neon_famin : AdvSIMD_2VectorArg_Intrinsic; // let TargetPrefix = "aarch64" in { - class SME2_FP8_CVT_X2_Single_Intrinsic - : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], - [llvm_nxv16i8_ty], - [IntrReadMem, IntrInaccessibleMemOnly]>; - // - // CVT from FP8 to half-precision/BFloat16 multi-vector - // - def int_aarch64_sve_fp8_cvt1_x2 : SME2_FP8_CVT_X2_Single_Intrinsic; - def int_aarch64_sve_fp8_cvt2_x2 : SME2_FP8_CVT_X2_Single_Intrinsic; - - // - // CVT from FP8 to deinterleaved half-precision/BFloat16 multi-vector - // - def int_aarch64_sve_fp8_cvtl1_x2 : SME2_FP8_CVT_X2_Single_Intrinsic; - def int_aarch64_sve_fp8_cvtl2_x2 : SME2_FP8_CVT_X2_Single_Intrinsic; +class SME2_FP8_CVT_X2_Single_Intrinsic + : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], + [llvm_nxv16i8_ty], + [IntrReadMem, IntrInaccessibleMemOnly]>; + +class SME_FP8_ZA_LANE_VGx1 : + DefaultAttrsIntrinsic<[], [llvm_i32_ty, + llvm_nxv16i8_ty, + llvm_nxv16i8_ty, + llvm_i32_ty], + [IntrInaccessibleMemOnly, IntrHasSideEffects, ImmArg<ArgIndex<3>>]>; + +class SME_FP8_ZA_LANE_VGx2 : + DefaultAttrsIntrinsic<[], [llvm_i32_ty, + llvm_nxv16i8_ty, llvm_nxv16i8_ty, + llvm_nxv16i8_ty, + llvm_i32_ty], + [IntrInaccessibleMemOnly, IntrHasSideEffects, ImmArg<ArgIndex<4>>]>; + +class SME_FP8_ZA_LANE_VGx4 : + DefaultAttrsIntrinsic<[], [llvm_i32_ty, + llvm_nxv16i8_ty, llvm_nxv16i8_ty, llvm_nxv16i8_ty, llvm_nxv16i8_ty, + llvm_nxv16i8_ty, + llvm_i32_ty], + [IntrInaccessibleMemOnly, IntrHasSideEffects, ImmArg<ArgIndex<6>>]>; + + // + // CVT from FP8 to half-precision/BFloat16 multi-vector + // + def int_aarch64_sve_fp8_cvt1_x2 : SME2_FP8_CVT_X2_Single_Intrinsic; + def int_aarch64_sve_fp8_cvt2_x2 : SME2_FP8_CVT_X2_Single_Intrinsic; + + // + // CVT from FP8 to deinterleaved half-precision/BFloat16 multi-vector + // + def int_aarch64_sve_fp8_cvtl1_x2 : SME2_FP8_CVT_X2_Single_Intrinsic; + def int_aarch64_sve_fp8_cvtl2_x2 : SME2_FP8_CVT_X2_Single_Intrinsic; + + // ZA multiply-add + // Double-vector groups (F8F16) + def int_aarch64_sme_fp8_fmlal_lane_za16_vg2x1 : SME_FP8_ZA_LANE_VGx1; + def int_aarch64_sme_fp8_fmlal_lane_za16_vg2x2 : SME_FP8_ZA_LANE_VGx2; + def int_aarch64_sme_fp8_fmlal_lane_za16_vg2x4 : SME_FP8_ZA_LANE_VGx4; + + // Quad-vector groups (F8F32) + def int_aarch64_sme_fp8_fmlall_lane_za32_vg4x1 : SME_FP8_ZA_LANE_VGx1; + def int_aarch64_sme_fp8_fmlall_lane_za32_vg4x2 : SME_FP8_ZA_LANE_VGx2; + def int_aarch64_sme_fp8_fmlall_lane_za32_vg4x4 : SME_FP8_ZA_LANE_VGx4; } \ No newline at end of file diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td index 9c657787d3492b..e189cbb789b288 100644 --- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td @@ -339,43 +339,43 @@ defm SQDMULH_VG4_4ZZ : sme2_int_sve_destructive_vector_vg4_single<"sqdmulh", 0b1 defm SQDMULH_VG2_2Z2Z : sme2_int_sve_destructive_vector_vg2_multi<"sqdmulh", 0b1000000>; defm SQDMULH_VG4_4Z4Z : sme2_int_sve_destructive_vector_vg4_multi<"sqdmulh", 0b1000000>; -defm FMLAL_MZZI : sme2_mla_long_array_index<"fmlal", 0b10, 0b00, nxv8f16, int_aarch64_sme_fmlal_lane_vg2x1>; -defm FMLAL_VG2_M2ZZI : sme2_fp_mla_long_array_vg2_index<"fmlal", 0b00, nxv8f16, int_aarch64_sme_fmlal_lane_vg2x2>; -defm FMLAL_VG4_M4ZZI : sme2_fp_mla_long_array_vg4_index<"fmlal", 0b00, nxv8f16, int_aarch64_sme_fmlal_lane_vg2x4>; +defm FMLAL_MZZI : sme2_mla_long_array_index_za32<"fmlal", 0b10, 0b00, nxv8f16, int_aarch64_sme_fmlal_lane_vg2x1>; +defm FMLAL_VG2_M2ZZI : sme2_fp_mla_long_array_vg2_index_za32<"fmlal", 0b00, nxv8f16, int_aarch64_sme_fmlal_lane_vg2x2>; +defm FMLAL_VG4_M4ZZI : sme2_fp_mla_long_array_vg4_index_za32<"fmlal", 0b00, nxv8f16, int_aarch64_sme_fmlal_lane_vg2x4>; defm FMLAL_MZZ : sme2_mla_long_array_single<"fmlal", 0b00, 0b00, nxv8f16, int_aarch64_sme_fmlal_single_vg2x1>; defm FMLAL_VG2_M2ZZ_HtoS : sme2_fp_mla_long_array_vg2_single<"fmlal", 0b000, MatrixOp32, ZZ_h, ZPR4b16, nxv8f16, int_aarch64_sme_fmlal_single_vg2x2>; defm FMLAL_VG4_M4ZZ_HtoS : sme2_fp_mla_long_array_vg4_single<"fmlal", 0b000, MatrixOp32, ZZZZ_h, ZPR4b16, nxv8f16, int_aarch64_sme_fmlal_single_vg2x4>; defm FMLAL_VG2_M2Z2Z_HtoS : sme2_fp_mla_long_array_vg2_multi<"fmlal", 0b000, MatrixOp32, ZZ_h_mul_r, nxv8f16, int_aarch64_sme_fmlal_vg2x2>; defm FMLAL_VG4_M4Z4Z_HtoS : sme2_fp_mla_long_array_vg4_multi<"fmlal", 0b000, MatrixOp32, ZZZZ_h_mul_r, nxv8f16, int_aarch64_sme_fmlal_vg2x4>; -defm FMLSL_MZZI : sme2_mla_long_array_index<"fmlsl", 0b10, 0b01, nxv8f16, int_aarch64_sme_fmlsl_lane_vg2x1>; -defm FMLSL_VG2_M2ZZI : sme2_fp_mla_long_array_vg2_index<"fmlsl", 0b01, nxv8f16, int_aarch64_sme_fmlsl_lane_vg2x2>; -defm FMLSL_VG4_M4ZZI : sme2_fp_mla_long_array_vg4_index<"fmlsl", 0b01, nxv8f16, int_aarch64_sme_fmlsl_lane_vg2x4>; +defm FMLSL_MZZI : sme2_mla_long_array_index_za32<"fmlsl", 0b10, 0b01, nxv8f16, int_aarch64_sme_fmlsl_lane_vg2x1>; +defm FMLSL_VG2_M2ZZI : sme2_fp_mla_long_array_vg2_index_za32<"fmlsl", 0b01, nxv8f16, int_aarch64_sme_fmlsl_lane_vg2x2>; +defm FMLSL_VG4_M4ZZI : sme2_fp_mla_long_array_vg4_index_za32<"fmlsl", 0b01, nxv8f16, int_aarch64_sme_fmlsl_lane_vg2x4>; defm FMLSL_MZZ : sme2_mla_long_array_single<"fmlsl", 0b00, 0b01, nxv8f16, int_aarch64_sme_fmlsl_single_vg2x1>; defm FMLSL_VG2_M2ZZ_HtoS : sme2_fp_mla_long_array_vg2_single<"fmlsl", 0b010, MatrixOp32, ZZ_h, ZPR4b16, nxv8f16, int_aarch64_sme_fmlsl_single_vg2x2>; defm FMLSL_VG4_M4ZZ_HtoS : sme2_fp_mla_long_array_vg4_single<"fmlsl", 0b010, MatrixOp32, ZZZZ_h, ZPR4b16, nxv8f16, int_aarch64_sme_fmlsl_single_vg2x4>; defm FMLSL_VG2_M2Z2Z_HtoS : sme2_fp_mla_long_array_vg2_multi<"fmlsl", 0b001, MatrixOp32, ZZ_h_mul_r, nxv8f16, int_aarch64_sme_fmlsl_vg2x2>; defm FMLSL_VG4_M4Z4Z_HtoS : sme2_fp_mla_long_array_vg4_multi<"fmlsl", 0b001, MatrixOp32, ZZZZ_h_mul_r, nxv8f16, int_aarch64_sme_fmlsl_vg2x4>; -defm BFMLAL_MZZI : sme2_mla_long_array_index<"bfmlal", 0b10, 0b10, nxv8bf16, int_aarch64_sme_fmlal_lane_vg2x1>; -defm BFMLAL_VG2_M2ZZI : sme2_fp_mla_long_array_vg2_index<"bfmlal", 0b10, nxv8bf16, int_aarch64_sme_fmlal_lane_vg2x2>; -defm BFMLAL_VG4_M4ZZI : sme2_fp_mla_long_array_vg4_index<"bfmlal", 0b10, nxv8bf16, int_aarch64_sme_fmlal_lane_vg2x4>; +defm BFMLAL_MZZI : sme2_mla_long_array_index_za32<"bfmlal", 0b10, 0b10, nxv8bf16, int_aarch64_sme_fmlal_lane_vg2x1>; +defm BFMLAL_VG2_M2ZZI : sme2_fp_mla_long_array_vg2_index_za32<"bfmlal", 0b10, nxv8bf16, int_aarch64_sme_fmlal_lane_vg2x2>; +defm BFMLAL_VG4_M4ZZI : sme2_fp_mla_long_array_vg4_index_za32<"bfmlal", 0b10, nxv8bf16, int_aarch64_sme_fmlal_lane_vg2x4>; defm BFMLAL_MZZ : sme2_mla_long_array_single<"bfmlal", 0b00, 0b10, nxv8bf16, int_aarch64_sme_fmlal_single_vg2x1>; defm BFMLAL_VG2_M2ZZ_HtoS : sme2_fp_mla_long_array_vg2_single<"bfmlal", 0b100, MatrixOp32, ZZ_h, ZPR4b16, nxv8bf16, int_aarch64_sme_fmlal_single_vg2x2>; defm BFMLAL_VG4_M4ZZ_HtoS : sme2_fp_mla_long_array_vg4_single<"bfmlal", 0b100, MatrixOp32, ZZZZ_h, ZPR4b16, nxv8bf16, int_aarch64_sme_fmlal_single_vg2x4>; defm BFMLAL_VG2_M2Z2Z_HtoS : sme2_fp_mla_long_array_vg2_multi<"bfmlal", 0b010, MatrixOp32, ZZ_h_mul_r, nxv8bf16, int_aarch64_sme_fmlal_vg2x2>; defm BFMLAL_VG4_M4Z4Z_HtoS : sme2_fp_mla_long_array_vg4_multi<"bfmlal", 0b010, MatrixOp32, ZZZZ_h_mul_r, nxv8bf16, int_aarch64_sme_fmlal_vg2x4>; -defm BFMLSL_MZZI : sme2_mla_long_array_index<"bfmlsl", 0b10, 0b11, nxv8bf16, int_aarch64_sme_fmlsl_lane_vg2x1>; -defm BFMLSL_VG2_M2ZZI : sme2_fp_mla_long_array_vg2_index<"bfmlsl", 0b11, nxv8bf16, int_aarch64_sme_fmlsl_lane_vg2x2>; -defm BFMLSL_VG4_M4ZZI : sme2_fp_mla_long_array_vg4_index<"bfmlsl", 0b11, nxv8bf16, int_aarch64_sme_fmlsl_lane_vg2x4>; +defm BFMLSL_MZZI : sme2_mla_long_array_index_za32<"bfmlsl", 0b10, 0b11, nxv8bf16, int_aarch64_sme_fmlsl_lane_vg2x1>; +defm BFMLSL_VG2_M2ZZI : sme2_fp_mla_long_array_vg2_index_za32<"bfmlsl", 0b11, nxv8bf16, int_aarch64_sme_fmlsl_lane_vg2x2>; +defm BFMLSL_VG4_M4ZZI : sme2_fp_mla_long_array_vg4_index_za32<"bfmlsl", 0b11, nxv8bf16, int_aarch64_sme_fmlsl_lane_vg2x4>; defm BFMLSL_MZZ : sme2_mla_long_array_single<"bfmlsl", 0b00, 0b11, nxv8bf16, int_aarch64_sme_fmlsl_single_vg2x1>; defm BFMLSL_VG2_M2ZZ_HtoS : sme2_fp_mla_long_array_vg2_single<"bfmlsl", 0b110, MatrixOp32, ZZ_h, ZPR4b16, nxv8bf16, int_aarch64_sme_fmlsl_single_vg2x2>; defm BFMLSL_VG4_M4ZZ_HtoS : sme2_fp_mla_long_array_vg4_single<"bfmlsl", 0b110, MatrixOp32, ZZZZ_h, ZPR4b16, nxv8bf16, int_aarch64_sme_fmlsl_single_vg2x4>; defm BFMLSL_VG2_M2Z2Z_HtoS : sme2_fp_mla_long_array_vg2_multi<"bfmlsl", 0b011, MatrixOp32, ZZ_h_mul_r, nxv8bf16, int_aarch64_sme_fmlsl_vg2x2>; defm BFMLSL_VG4_M4Z4Z_HtoS : sme2_fp_mla_long_array_vg4_multi<"bfmlsl", 0b011, MatrixOp32, ZZZZ_h_mul_r, nxv8bf16, int_aarch64_sme_fmlsl_vg2x4>; -defm SMLAL_MZZI : sme2_mla_long_array_index<"smlal", 0b11, 0b00, nxv8i16, int_aarch64_sme_smlal_lane_vg2x1>; +defm SMLAL_MZZI : sme2_mla_long_array_index_za32<"smlal", 0b11, 0b00, nxv8i16, int_aarch64_sme_smlal_lane_vg2x1>; defm SMLAL_VG2_M2ZZI : sme2_int_mla_long_array_vg2_index<"smlal", 0b00, int_aarch64_sme_smlal_lane_vg2x2>; defm SMLAL_VG4_M4ZZI : sme2_int_mla_long_array_vg4_index<"smlal", 0b00, int_aarch64_sme_smlal_lane_vg2x4>; defm SMLAL_MZZ : sme2_mla_long_array_single<"smlal",0b01, 0b00, nxv8i16, int_aarch64_sme_smlal_single_vg2x1>; @@ -384,7 +384,7 @@ defm SMLAL_VG4_M4ZZ : sme2_int_mla_long_array_vg4_single<"smlal", 0b00, int_aar defm SMLAL_VG2_M2Z2Z : sme2_int_mla_long_array_vg2_multi<"smlal", 0b00, int_aarch64_sme_smlal_vg2x2>; defm SMLAL_VG4_M4Z4Z : sme2_int_mla_long_array_vg4_multi<"smlal", 0b00, int_aarch64_sme_smlal_vg2x4>; -defm SMLSL_MZZI : sme2_mla_long_array_index<"smlsl", 0b11, 0b01, nxv8i16, int_aarch64_sme_smlsl_lane_vg2x1>; +defm SMLSL_MZZI : sme2_mla_long_array_index_za32<"smlsl", 0b11, 0b01, nxv8i16, int_aarch64_sme_smlsl_lane_vg2x1>; defm SMLSL_VG2_M2ZZI : sme2_int_mla_long_array_vg2_index<"smlsl", 0b01, int_aarch64_sme_smlsl_lane_vg2x2>; defm SMLSL_VG4_M4ZZI : sme2_int_mla_long_array_vg4_index<"smlsl", 0b01, int_aarch64_sme_smlsl_lane_vg2x4>; defm SMLSL_MZZ : sme2_mla_long_array_single<"smlsl",0b01, 0b01, nxv8i16, int_aarch64_sme_smlsl_single_vg2x1>; @@ -393,7 +393,7 @@ defm SMLSL_VG4_M4ZZ : sme2_int_mla_long_array_vg4_single<"smlsl", 0b01, int_aar defm SMLSL_VG2_M2Z2Z : sme2_int_mla_long_array_vg2_multi<"smlsl", 0b01, int_aarch64_sme_smlsl_vg2x2>; defm SMLSL_VG4_M4Z4Z : sme2_int_mla_long_array_vg4_multi<"smlsl", 0b01, int_aarch64_sme_smlsl_vg2x4>; -defm UMLAL_MZZI : sme2_mla_long_array_index<"umlal", 0b11, 0b10, nxv8i16, int_aarch64_sme_umlal_lane_vg2x1>; +defm UMLAL_MZZI : sme2_mla_long_array_index_za32<"umlal", 0b11, 0b10, nxv8i16, int_aarch64_sme_umlal_lane_vg2x1>; defm UMLAL_VG2_M2ZZI : sme2_int_mla_long_array_vg2_index<"umlal", 0b10, int_aarch64_sme_umlal_lane_vg2x2>; defm UMLAL_VG4_M4ZZI : sme2_int_mla_long_array_vg4_index<"umlal", 0b10, int_aarch64_sme_umlal_lane_vg2x4>; defm UMLAL_MZZ : sme2_mla_long_array_single<"umlal",0b01, 0b10, nxv8i16, int_aarch64_sme_umlal_single_vg2x1>; @@ -402,7 +402,7 @@ defm UMLAL_VG4_M4ZZ : sme2_int_mla_long_array_vg4_single<"umlal", 0b10, int_aar defm UMLAL_VG2_M2Z2Z : sme2_int_mla_long_array_vg2_multi<"umlal", 0b10, int_aarch64_sme_umlal_vg2x2>; defm UMLAL_VG4_M4Z4Z : sme2_int_mla_long_array_vg4_multi<"umlal", 0b10, int_aarch64_sme_umlal_vg2x4>; -defm UMLSL_MZZI : sme2_mla_long_array_index<"umlsl", 0b11, 0b11, nxv8i16, int_aarch64_sme_umlsl_lane_vg2x1>; +defm UMLSL_MZZI : sme2_mla_long_array_index_za32<"umlsl", 0b11, 0b11, nxv8i16, int_aarch64_sme_umlsl_lane_vg2x1>; defm UMLSL_VG2_M2ZZI : sme2_int_mla_long_array_vg2_index<"umlsl", 0b11, int_aarch64_sme_umlsl_lane_vg2x2>; defm UMLSL_VG4_M4ZZI : sme2_int_mla_long_array_vg4_index<"umlsl", 0b11, int_aarch64_sme_umlsl_lane_vg2x4>; defm UMLSL_MZZ : sme2_mla_long_array_single<"umlsl",0b01, 0b11, nxv8i16, int_aarch64_sme_umlsl_single_vg2x1>; @@ -994,9 +994,10 @@ defm FDOT_VG4_M4ZZ_BtoH : sme2_dot_mla_add_sub_array_vg24_single<"fdot", 0b01 defm FDOT_VG2_M2Z2Z_BtoH : sme2_dot_mla_add_sub_array_vg2_multi<"fdot", 0b0100100, MatrixOp16, ZZ_b_mul_r, nxv16i8, null_frag>; defm FDOT_VG4_M4Z4Z_BtoH : sme2_dot_mla_add_sub_array_vg4_multi<"fdot", 0b0100100, MatrixOp16, ZZZZ_b_mul_r, nxv16i8, null_frag>; -def FMLAL_MZZI_BtoH : sme2_mla_ll_array_index_16b<"fmlal", 0b11, 0b00>; -defm FMLAL_VG2_M2ZZI_BtoH : sme2_multi_vec_array_vg2_index_16b<"fmlal", 0b10, 0b111>; -defm FMLAL_VG4_M4ZZI_BtoH : sme2_multi_vec_array_vg4_index_16b<"fmlal", 0b10, 0b110>; +defm FMLAL_MZZI_BtoH : sme2_mla_long_array_index_za16<"fmlal", int_aarch64_sme_fp8_fmlal_lane_za16_vg2x1>; +defm FMLAL_VG2_M2ZZI_BtoH : sme2_mla_long_array_vg2_index_za16<"fmlal", int_aarch64_sme_fp8_fmlal_lane_za16_vg2x2>; +defm FMLAL_VG4_M4ZZI_BtoH : sme2_mla_long_array_vg4_index_za16<"fmlal", int_aarch64_sme_fp8_fmlal_lane_za16_vg2x4>; + def FMLAL_VG2_MZZ_BtoH : sme2_mla_long_array_single_16b<"fmlal">; defm FMLAL_VG2_M2ZZ_BtoH : sme2_fp_mla_long_array_vg2_single<"fmlal", 0b001, MatrixOp16, ZZ_b, ZPR4b8, nxv16i8, null_frag>; @@ -1020,9 +1021,9 @@ defm FDOT_VG4_M4Z4Z_BtoS : sme2_dot_mla_add_sub_array_vg4_multi<"fdot", 0b0100 def FVDOTB_VG4_M2ZZI_BtoS : sme2_fp8_multi_vec_array_vg4_index<"fvdotb", 0b0>; def FVDOTT_VG4_M2ZZI_BtoS : sme2_fp8_multi_vec_array_vg4_index<"fvdott", 0b1>; -defm FMLALL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"fmlall", 0b01, 0b000, null_frag>; -defm FMLALL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"fmlall", 0b10, 0b100, null_frag>; -defm FMLALL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"fmlall", 0b00, 0b1000, null_frag>; +defm FMLALL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"fmlall", 0b01, 0b000, int_aarch64_sme_fp8_fmlall_lane_za32_vg4x1>; +defm FMLALL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"fmlall", 0b10, 0b100, int_aarch64_sme_fp8_fmlall_lane_za32_vg4x2>; +defm FMLALL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"fmlall", 0b00, 0b1000, int_aarch64_sme_fp8_fmlall_lane_za32_vg4x4>; defm FMLALL_MZZ_BtoS : sme2_mla_ll_array_single<"fmlall", 0b01000, MatrixOp32, ZPR8, ZPR4b8, nxv16i8, null_frag>; defm FMLALL_VG2_M2ZZ_BtoS : sme2_mla_ll_array_vg24_single<"fmlall", 0b000001, MatrixOp32, ZZ_b, ZPR4b8>; diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td index e6535f957e2024..7723caf78500a4 100644 --- a/llvm/lib/Target/AArch64/SMEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td @@ -1972,12 +1972,12 @@ multiclass sme2p1_bf_max_min_vector_vg4_multi<string mnemonic, bits<7>op> { //===----------------------------------------------------------------------===// // SME2 Multi-vector - Index/Single/Multi Array Vectors FMA sources -class sme2_mla_long_array_index_base<bits<2> op0, bits<2> op, Operand index_ty, - RegisterOperand multi_vector_ty, - string mnemonic, string vg_acronym=""> - : I<(outs MatrixOp32:$ZAda), - (ins MatrixOp32:$_ZAda, MatrixIndexGPR32Op8_11:$Rv, index_ty:$imm, multi_vector_ty:$Zn, ZPR4b16:$Zm, VectorIndexH32b_timm:$i3), - mnemonic, "\t$ZAda[$Rv, $imm" # !if(!eq(vg_acronym, ""), "", ", " # vg_acronym) # "], $Zn, $Zm$i3", +class sme2_mla_long_array_index_base<bits<2> op0, MatrixOperand mat_ty, Operand slice_ty, + RegisterOperand multi_vector_ty, RegisterOperand vector_ty, + Operand index_ty, string mnemonic, string vg_acronym=""> + : I<(outs mat_ty:$ZAda), + (ins mat_ty:$_ZAda, MatrixIndexGPR32Op8_11:$Rv, slice_ty:$imm, multi_vector_ty:$Zn, vector_ty:$Zm, index_ty:$vi), + mnemonic, "\t$ZAda[$Rv, $imm" # !if(!eq(vg_acronym, ""), "", ", " # vg_acronym) # "], $Zn, $Zm$vi", "", []>, Sched<[]> { bits<4> Zm; bits<2> Rv; @@ -1987,21 +1987,91 @@ class sme2_mla_long_array_index_base<bits<2> op0, bits<2> op, Operand index_ty, let Inst{20} = !if(!eq(vg_acronym, ""), 0, 1); let Inst{19-16} = Zm; let Inst{14-13} = Rv; - let Inst{12} = 0b1; - let Inst{4-3} = op; let Constraints = "$ZAda = $_ZAda"; } -multiclass sme2_mla_long_array_index<string mnemonic, bits<2> op0, bits<2> op, ValueType zpr_ty, SDPatternOperator intrinsic> { - def _HtoS : sme2_mla_long_array_index_base<op0, op, uimm3s2range, ZPR16, - mnemonic>, SMEPseudo2Instr<NAME # _HtoS, 1> { - bits<3> i3; +multiclass sme2_mla_long_array_index_za16<string mnemonic, SDPatternOperator intrinsic> { + def : sme2_mla_long_array_index_base<0b11, MatrixOp16, uimm3s2range, ZPR8, ZPR4b8, VectorIndexB32b_timm, + mnemonic>, SMEPseudo2Instr<NAME, 1> { + bits<4> vi; + bits<3> imm; + bits<5> Zn; + let Inst{15} = vi{3}; + let Inst{12} = 0b0; + let Inst{11-10} = vi{2-1}; + let Inst{9-5} = Zn; + let Inst{4} = 0b0; + let Inst{3} = vi{0}; + let Inst{2-0} = imm; + } + + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, uimm3s2range, ZPR8, ZPR4b8, VectorIndexB32b_timm, SMEMatrixArray>; + + def : SME2_ZA_TwoOp_Multi_Index_Pat<NAME, intrinsic, uimm3s2range, ZPR4b8, nxv16i8, VectorIndexB32b_timm, tileslicerange3s2>; +} + +multiclass sme2_mla_long_array_vg2_index_za16<string mnemonic, SDPatternOperator intrinsic> { + + def NAME : sme2_mla_long_array_index_base<0b10, MatrixOp16, uimm2s2range, ZZ_b_mul_r, ZPR4b8, + VectorIndexB32b_timm, mnemonic, "vgx2">, SMEPseudo2Instr<NAME, 1> { + bits<4> vi; + bits<2> imm; + bits<4> Zn; + let Inst{15} = 0b0; + let Inst{12} = 0b1; + let Inst{11-10} = vi{3-2}; + let Inst{9-6} = Zn; + let Inst{5-4} = 0b11; + let Inst{3-2} = vi{1-0}; + let Inst{1-0} = imm; + } + + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, uimm2s2range, ZZ_b_mul_r, ZPR4b8, VectorIndexB32b_timm, SMEMatrixArray>; + + def : SME2_ZA_TwoOp_VG2_Multi_Index_Pat<NAME, intrinsic, uimm2s2range, ZPR4b8, nxv16i8, VectorIndexB32b_timm, tileslicerange2s2>; + + def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm], $Zn, $Zm$vimm", + (!cast<Instruction>(NAME) MatrixOp16:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm2s2range:$imm, + ZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$vimm), 0>; +} + +multiclass sme2_mla_long_array_vg4_index_za16<string mnemonic, SDPatternOperator intrinsic> { + + def NAME : sme2_mla_long_array_index_base<0b10, MatrixOp16, uimm2s2range, ZZZZ_b_mul_r, ZPR4b8, + VectorIndexB32b_timm, mnemonic, "vgx4">, SMEPseudo2Instr<NAME, 1> { + bits<4> vi; + bits<2> imm; + bits<3> Zn; + let Inst{15} = 0b1; + let Inst{12} = 0b1; + let Inst{11-10} = vi{3-2}; + let Inst{9-7} = Zn; + let Inst{6-4} = 0b010; + let Inst{3-2} = vi{1-0}; + let Inst{1-0} = imm; + } + + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, uimm2s2range, ZZZZ_b_mul_r, ZPR4b8, VectorIndexB32b_timm, SMEMatrixArray>; + + def : SME2_ZA_TwoOp_VG4_Multi_Index_Pat<NAME, intrinsic, uimm2s2range, ZPR4b8, nxv16i8, VectorIndexB32b_timm, tileslicerange2s2>; + + def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm], $Zn, $Zm$vimm", + (!cast<Instruction>(NAME) MatrixOp16:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm2s2range:$imm, + ZZZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$vimm), 0>; +} + +multiclass sme2_mla_long_array_index_za32<string mnemonic, bits<2> op0, bits<2> op, ValueType zpr_ty, SDPatternOperator intrinsic> { + def _HtoS : sme2_mla_long_array_index_base<op0, MatrixOp32, uimm3s2range, ZPR16, ZPR4b16, + VectorIndexH32b_timm, mnemonic>, SMEPseudo2Instr<NAME # _HtoS, 1> { + bits<3> vi; bits<5> Zn; bits<3> imm; - let Inst{15} = i3{2}; - let Inst{11-10} = i3{1-0}; + let Inst{15} = vi{2}; + let Inst{12} = 0b1; + let Inst{11-10} = vi{1-0}; let Inst{9-5} = Zn; + let Inst{4-3} = op; let Inst{2-0} = imm; } @@ -2010,22 +2080,24 @@ multiclass sme2_mla_long_array_index<string mnemonic, bits<2> op0, bits<2> op, V def : SME2_ZA_TwoOp_Multi_Index_Pat<NAME # _HtoS, intrinsic, uimm3s2range, ZPR4b16, zpr_ty, VectorIndexH32b_timm, tileslicerange3s2>; } -class sme2_mla_long_array_vg2_index<string mnemonic, bits<2> op0, bits<2> op> - : sme2_mla_long_array_index_base<op0, op, uimm2s2range, ZZ_h_mul_r, - mnemonic, "vgx2"> { - bits<3> i3; +class sme2_mla_long_array_vg2_index_za32<string mnemonic, bits<2> op0, bits<2> op> + : sme2_mla_long_array_index_base<op0, MatrixOp32, uimm2s2range, ZZ_h_mul_r, ZPR4b16, + VectorIndexH32b_timm, mnemonic, "vgx2"> { + bits<3> vi; bits<4> Zn; bits<2> imm; let Inst{15} = 0b0; - let Inst{11-10} = i3{2-1}; + let Inst{12} = 0b1; + let Inst{11-10} = vi{2-1}; let Inst{9-6} = Zn; let Inst{5} = 0b0; - let Inst{2} = i3{0}; + let Inst{4-3} = op; + let Inst{2} = vi{0}; let Inst{1-0} = imm; } -multiclass sme2_fp_mla_long_array_vg2_index<string mnemonic, bits<2> op, ValueType zpr_ty, SDPatternOperator intrinsic> { - def _HtoS : sme2_mla_long_array_vg2_index<mnemonic, 0b10, op>, SMEPseudo2Instr<NAME # _HtoS, 1>; +multiclass sme2_fp_mla_long_array_vg2_index_za32<string mnemonic, bits<2> op, ValueType zpr_ty, SDPatternOperator intrinsic> { + def _HtoS : sme2_mla_long_array_vg2_index_za32<mnemonic, 0b10, op>, SMEPseudo2Instr<NAME # _HtoS, 1>; def _HtoS_PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME # _HtoS, uimm2s2range, ZZ_h_mul_r, ZPR4b16, VectorIndexH32b_timm, SMEMatrixArray>; @@ -2036,7 +2108,7 @@ multiclass sme2_fp_mla_long_array_vg2_index<string mnemonic, bits<2> op, ValueTy } multiclass sme2_int_mla_long_array_vg2_index<string mnemonic, bits<2> op, SDPatternOperator intrinsic> { - def _S : sme2_mla_long_array_vg2_index<mnemonic, 0b11, op>, SMEPseudo2Instr<NAME # _S, 1>; + def _S : sme2_mla_long_array_vg2_index_za32<mnemonic, 0b11, op>, SMEPseudo2Instr<NAME # _S, 1>; def _S_PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME # _S, uimm2s2range, ZZ_h_mul_r, ZPR4b16, VectorIndexH32b_timm, SMEMatrixArray>; @@ -2046,22 +2118,24 @@ multiclass sme2_int_mla_long_array_vg2_index<string mnemonic, bits<2> op, SDPatt (!cast<Instruction>(NAME #_S) MatrixOp32:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm2s2range:$imm, ZZ_h_mul_r:$Zn, ZPR4b16:$Zm, VectorIndexH32b_timm:$i3), 0>; } -class sme2_mla_long_array_vg4_index<string mnemonic, bits<2> op0, bits<2> op> - : sme2_mla_long_array_index_base<op0, op, uimm2s2range, ZZZZ_h_mul_r, +class sme2_mla_long_array_vg4_index_za32<string mnemonic, bits<2> op0, bits<2> op> + : sme2_mla_long_array_index_base<op0, MatrixOp32, uimm2s2range, ZZZZ_h_mul_r, ZPR4b16, VectorIndexH32b_timm, mnemonic, "vgx4"> { - bits<3> i3; + bits<3> vi; bits<3> Zn; bits<2> imm; let Inst{15} = 0b1; - let Inst{11-10} = i3{2-1}; + let Inst{12} = 0b1; + let Inst{11-10} = vi{2-1}; let Inst{9-7} = Zn; let Inst{6-5} = 0b00; - let Inst{2} = i3{0}; + let Inst{4-3} = op; + let Inst{2} = vi{0}; let Inst{1-0} = imm; } -multiclass sme2_fp_mla_long_array_vg4_index<string mnemonic, bits<2> op, ValueType zpr_ty, SDPatternOperator intrinsic> { - def _HtoS : sme2_mla_long_array_vg4_index<mnemonic, 0b10, op>, SMEPseudo2Instr<NAME # _HtoS, 1>; +multiclass sme2_fp_mla_long_array_vg4_index_za32<string mnemonic, bits<2> op, ValueType zpr_ty, SDPatternOperator intrinsic> { + def _HtoS : sme2_mla_long_array_vg4_index_za32<mnemonic, 0b10, op>, SMEPseudo2Instr<NAME # _HtoS, 1>; def _HtoS_PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME # _HtoS, uimm2s2range, ZZZZ_h_mul_r, ZPR4b16, VectorIndexH32b_timm, SMEMatrixArray>; @@ -2072,7 +2146,7 @@ multiclass sme2_fp_mla_long_array_vg4_index<string mnemonic, bits<2> op, ValueTy } multiclass sme2_int_mla_long_array_vg4_index<string mnemonic, bits<2> op, SDPatternOperator intrinsic> { - def _HtoS : sme2_mla_long_array_vg4_index<mnemonic, 0b11, op>, SMEPseudo2Instr<NAME # _HtoS, 1>; + def _HtoS : sme2_mla_long_array_vg4_index_za32<mnemonic, 0b11, op>, SMEPseudo2Instr<NAME # _HtoS, 1>; def _HtoS_PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME # _HtoS, uimm2s2range, ZZZZ_h_mul_r, ZPR4b16, VectorIndexH32b_timm, SMEMatrixArray>; @@ -2880,82 +2954,7 @@ multiclass sme2_multi_vec_array_vg4_index_64b<string mnemonic, bits<3> op, multi_vector_ty:$Zn, vector_ty:$Zm, VectorIndexD32b_timm:$i1), 0>; } -// FMLAL (multiple and indexed vector, FP8 to FP16) -class sme2_multi_vec_array_vg24_index_16b<bits<2> sz, bit vg4, bits<3> op, - RegisterOperand multi_vector_ty, string mnemonic> - : I<(outs MatrixOp16:$ZAda), - (ins MatrixOp16:$_ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm2s2range:$imm2, - multi_vector_ty:$Zn, ZPR4b8:$Zm, VectorIndexB:$i), - mnemonic, "\t$ZAda[$Rv, $imm2, " # !if(vg4, "vgx4", "vgx2") # "], $Zn, $Zm$i", - "", []>, Sched<[]> { - bits<4> Zm; - bits<2> Rv; - bits<4> i; - bits<2> imm2; - let Inst{31-24} = 0b11000001; - let Inst{23-22} = sz; - let Inst{21-20} = 0b01; - let Inst{19-16} = Zm; - let Inst{15} = vg4; - let Inst{14-13} = Rv; - let Inst{12} = op{2}; - let Inst{11-10} = i{3-2}; - let Inst{5-4} = op{1-0}; - let Inst{3-2} = i{1-0}; - let Inst{1-0} = imm2; - - let Constraints = "$ZAda = $_ZAda"; -} - -multiclass sme2_multi_vec_array_vg2_index_16b<string mnemonic, bits<2> sz, bits<3>op> { - def NAME : sme2_multi_vec_array_vg24_index_16b<sz, 0b0, op, ZZ_b_mul_r, mnemonic> { - bits<4> Zn; - let Inst{9-6} = Zn; - } - def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm2], $Zn, $Zm$i", - (!cast<Instruction>(NAME) MatrixOp16:$ZAda, MatrixIndexGPR32Op8_11:$Rv, - uimm2s2range:$imm2, ZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB:$i), 0>; -} - -multiclass sme2_multi_vec_array_vg4_index_16b<string mnemonic, bits<2>sz, bits<3>op> { - def NAME: sme2_multi_vec_array_vg24_index_16b<sz, 0b1, op, ZZZZ_b_mul_r, mnemonic> { - bits<3> Zn; - let Inst{9-7} = Zn; - let Inst{6} = 0b0; - } - def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm2], $Zn, $Zm$i", - (!cast<Instruction>(NAME) MatrixOp16:$ZAda, MatrixIndexGPR32Op8_11:$Rv, - uimm2s2range:$imm2, ZZZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB:$i), 0>; -} - //===----------------------------------------------------------------------===// -// SME2 multi-vec indexed long long MLA one source 16-bit -class sme2_mla_ll_array_index_16b<string mnemonic, bits<2> sz,bits<2> op> - : I<(outs MatrixOp16:$ZAda), - (ins MatrixOp16:$_ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm3s2range:$imm3, ZPR8:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$i), - mnemonic, "\t$ZAda[$Rv, $imm3], $Zn, $Zm$i", - "", []>, Sched<[]> { - bits<4> Zm; - bits<2> Rv; - bits<4> i; - bits<5> Zn; - bits<3> imm3; - let Inst{31-24} = 0b11000001; - let Inst{23-22} = sz; - let Inst{21-20} = 0b00; - let Inst{19-16} = Zm; - let Inst{15} = i{3}; - let Inst{14-13} = Rv; - let Inst{12} = op{1}; - let Inst{11-10} = i{2-1}; - let Inst{9-5} = Zn; - let Inst{4} = op{0}; - let Inst{3} = i{0}; - let Inst{2-0} = imm3; - - let Constraints = "$ZAda = $_ZAda"; -} - // SME2 multi-vec indexed long long MLA one source 32-bit class sme2_mla_ll_array_index_32b<string mnemonic, bits<2> sz, bits<3> op> : I<(outs MatrixOp32:$ZAda), diff --git a/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-mla.ll b/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-mla.ll new file mode 100644 index 00000000000000..b344fd72b3714b --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-mla.ll @@ -0,0 +1,116 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --filter-out "// kill:" --version 4 +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme-f8f16,+sme-f8f32 -force-streaming < %s | FileCheck %s + +; FMLAL (indexed) + +define void @test_fmlal_vg2x1(i32 %slice, <vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm) { +; CHECK-LABEL: test_fmlal_vg2x1: +; CHECK: // %bb.0: +; CHECK: mov w8, w0 +; CHECK: fmlal za.h[w8, 0:1], z0.b, z1.b[0] +; CHECK: fmlal za.h[w8, 14:15], z0.b, z1.b[15] +; CHECK: ret + call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x1(i32 %slice, + <vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm, + i32 0) + %add = add i32 %slice, 14 + call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x1(i32 %add, + <vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm, + i32 15) + ret void +} + +define void @test_fmlal_vg2x2(i32 %slice, <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zm) { +; CHECK-LABEL: test_fmlal_vg2x2: +; CHECK: // %bb.0: +; CHECK: mov w8, w0 +; CHECK: fmlal za.h[w8, 0:1, vgx2], { z0.b, z1.b }, z2.b[0] +; CHECK: fmlal za.h[w8, 6:7, vgx2], { z0.b, z1.b }, z2.b[15] +; CHECK: ret + call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x2(i32 %slice, + <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, + <vscale x 16 x i8> %zm, + i32 0) + %add = add i32 %slice, 6 + call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x2(i32 %add, + <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, + <vscale x 16 x i8> %zm, + i32 15) + ret void +} + +define void @test_fmlal_vg2x4(i32 %slice, <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zm) { +; CHECK-LABEL: test_fmlal_vg2x4: +; CHECK: // %bb.0: +; CHECK: mov w8, w0 +; CHECK: fmlal za.h[w8, 0:1, vgx4], { z0.b - z3.b }, z4.b[0] +; CHECK: fmlal za.h[w8, 6:7, vgx4], { z0.b - z3.b }, z4.b[15] +; CHECK: ret + call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x4(i32 %slice, + <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, + <vscale x 16 x i8> %zm, + i32 0) + %add = add i32 %slice, 6 + call void @llvm.aarch64.sme.fp8.fmlal.lane.za16.vg2x4(i32 %add, + <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, + <vscale x 16 x i8> %zm, + i32 15) + ret void +} + +; FMLALL (indexed) + +define void @test_fmlall_vg4x1(i32 %slice, <vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm) { +; CHECK-LABEL: test_fmlall_vg4x1: +; CHECK: // %bb.0: +; CHECK: mov w8, w0 +; CHECK: fmlall za.s[w8, 0:3], z0.b, z1.b[0] +; CHECK: fmlall za.s[w8, 12:15], z0.b, z1.b[15] +; CHECK: ret + call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x1(i32 %slice, + <vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm, + i32 0) + %add = add i32 %slice, 12 + call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x1(i32 %add, + <vscale x 16 x i8> %zn, <vscale x 16 x i8> %zm, + i32 15) + ret void +} + +define void @test_fmlall_vg4x2(i32 %slice, <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zm) { +; CHECK-LABEL: test_fmlall_vg4x2: +; CHECK: // %bb.0: +; CHECK: mov w8, w0 +; CHECK: fmlall za.s[w8, 0:3, vgx2], { z0.b, z1.b }, z2.b[0] +; CHECK: fmlall za.s[w8, 4:7, vgx2], { z0.b, z1.b }, z2.b[15] +; CHECK: ret + call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x2(i32 %slice, + <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, + <vscale x 16 x i8> %zm, + i32 0) + %add = add i32 %slice, 4 + call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x2(i32 %add, + <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, + <vscale x 16 x i8> %zm, + i32 15) + ret void +} + +define void @test_fmlall_vg4x4(i32 %slice, <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zm) { +; CHECK-LABEL: test_fmlall_vg4x4: +; CHECK: // %bb.0: +; CHECK: mov w8, w0 +; CHECK: fmlall za.s[w8, 0:3, vgx4], { z0.b - z3.b }, z4.b[8] +; CHECK: fmlall za.s[w8, 4:7, vgx4], { z0.b - z3.b }, z4.b[15] +; CHECK: ret + call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x4(i32 %slice, + <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, + <vscale x 16 x i8> %zm, + i32 8) + %add = add i32 %slice, 4 + call void @llvm.aarch64.sme.fp8.fmlall.lane.za32.vg4x4(i32 %add, + <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, + <vscale x 16 x i8> %zm, + i32 15) + ret void +} >From 4c6f8a12245ef6c83d77044ad6cb5ad7c92953e9 Mon Sep 17 00:00:00 2001 From: Spencer Abson <spencer.ab...@arm.com> Date: Thu, 5 Dec 2024 17:54:32 +0000 Subject: [PATCH 3/3] [NFC] Revert refactoring --- .../lib/Target/AArch64/AArch64SMEInstrInfo.td | 45 ++- llvm/lib/Target/AArch64/SMEInstrFormats.td | 275 +++++++++++------- 2 files changed, 192 insertions(+), 128 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td index e189cbb789b288..e50e0b9dabc613 100644 --- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td @@ -339,43 +339,43 @@ defm SQDMULH_VG4_4ZZ : sme2_int_sve_destructive_vector_vg4_single<"sqdmulh", 0b1 defm SQDMULH_VG2_2Z2Z : sme2_int_sve_destructive_vector_vg2_multi<"sqdmulh", 0b1000000>; defm SQDMULH_VG4_4Z4Z : sme2_int_sve_destructive_vector_vg4_multi<"sqdmulh", 0b1000000>; -defm FMLAL_MZZI : sme2_mla_long_array_index_za32<"fmlal", 0b10, 0b00, nxv8f16, int_aarch64_sme_fmlal_lane_vg2x1>; -defm FMLAL_VG2_M2ZZI : sme2_fp_mla_long_array_vg2_index_za32<"fmlal", 0b00, nxv8f16, int_aarch64_sme_fmlal_lane_vg2x2>; -defm FMLAL_VG4_M4ZZI : sme2_fp_mla_long_array_vg4_index_za32<"fmlal", 0b00, nxv8f16, int_aarch64_sme_fmlal_lane_vg2x4>; +defm FMLAL_MZZI : sme2_mla_long_array_index<"fmlal", 0b10, 0b00, nxv8f16, int_aarch64_sme_fmlal_lane_vg2x1>; +defm FMLAL_VG2_M2ZZI : sme2_fp_mla_long_array_vg2_index<"fmlal", 0b00, nxv8f16, int_aarch64_sme_fmlal_lane_vg2x2>; +defm FMLAL_VG4_M4ZZI : sme2_fp_mla_long_array_vg4_index<"fmlal", 0b00, nxv8f16, int_aarch64_sme_fmlal_lane_vg2x4>; defm FMLAL_MZZ : sme2_mla_long_array_single<"fmlal", 0b00, 0b00, nxv8f16, int_aarch64_sme_fmlal_single_vg2x1>; defm FMLAL_VG2_M2ZZ_HtoS : sme2_fp_mla_long_array_vg2_single<"fmlal", 0b000, MatrixOp32, ZZ_h, ZPR4b16, nxv8f16, int_aarch64_sme_fmlal_single_vg2x2>; defm FMLAL_VG4_M4ZZ_HtoS : sme2_fp_mla_long_array_vg4_single<"fmlal", 0b000, MatrixOp32, ZZZZ_h, ZPR4b16, nxv8f16, int_aarch64_sme_fmlal_single_vg2x4>; defm FMLAL_VG2_M2Z2Z_HtoS : sme2_fp_mla_long_array_vg2_multi<"fmlal", 0b000, MatrixOp32, ZZ_h_mul_r, nxv8f16, int_aarch64_sme_fmlal_vg2x2>; defm FMLAL_VG4_M4Z4Z_HtoS : sme2_fp_mla_long_array_vg4_multi<"fmlal", 0b000, MatrixOp32, ZZZZ_h_mul_r, nxv8f16, int_aarch64_sme_fmlal_vg2x4>; -defm FMLSL_MZZI : sme2_mla_long_array_index_za32<"fmlsl", 0b10, 0b01, nxv8f16, int_aarch64_sme_fmlsl_lane_vg2x1>; -defm FMLSL_VG2_M2ZZI : sme2_fp_mla_long_array_vg2_index_za32<"fmlsl", 0b01, nxv8f16, int_aarch64_sme_fmlsl_lane_vg2x2>; -defm FMLSL_VG4_M4ZZI : sme2_fp_mla_long_array_vg4_index_za32<"fmlsl", 0b01, nxv8f16, int_aarch64_sme_fmlsl_lane_vg2x4>; +defm FMLSL_MZZI : sme2_mla_long_array_index<"fmlsl", 0b10, 0b01, nxv8f16, int_aarch64_sme_fmlsl_lane_vg2x1>; +defm FMLSL_VG2_M2ZZI : sme2_fp_mla_long_array_vg2_index<"fmlsl", 0b01, nxv8f16, int_aarch64_sme_fmlsl_lane_vg2x2>; +defm FMLSL_VG4_M4ZZI : sme2_fp_mla_long_array_vg4_index<"fmlsl", 0b01, nxv8f16, int_aarch64_sme_fmlsl_lane_vg2x4>; defm FMLSL_MZZ : sme2_mla_long_array_single<"fmlsl", 0b00, 0b01, nxv8f16, int_aarch64_sme_fmlsl_single_vg2x1>; defm FMLSL_VG2_M2ZZ_HtoS : sme2_fp_mla_long_array_vg2_single<"fmlsl", 0b010, MatrixOp32, ZZ_h, ZPR4b16, nxv8f16, int_aarch64_sme_fmlsl_single_vg2x2>; defm FMLSL_VG4_M4ZZ_HtoS : sme2_fp_mla_long_array_vg4_single<"fmlsl", 0b010, MatrixOp32, ZZZZ_h, ZPR4b16, nxv8f16, int_aarch64_sme_fmlsl_single_vg2x4>; defm FMLSL_VG2_M2Z2Z_HtoS : sme2_fp_mla_long_array_vg2_multi<"fmlsl", 0b001, MatrixOp32, ZZ_h_mul_r, nxv8f16, int_aarch64_sme_fmlsl_vg2x2>; defm FMLSL_VG4_M4Z4Z_HtoS : sme2_fp_mla_long_array_vg4_multi<"fmlsl", 0b001, MatrixOp32, ZZZZ_h_mul_r, nxv8f16, int_aarch64_sme_fmlsl_vg2x4>; -defm BFMLAL_MZZI : sme2_mla_long_array_index_za32<"bfmlal", 0b10, 0b10, nxv8bf16, int_aarch64_sme_fmlal_lane_vg2x1>; -defm BFMLAL_VG2_M2ZZI : sme2_fp_mla_long_array_vg2_index_za32<"bfmlal", 0b10, nxv8bf16, int_aarch64_sme_fmlal_lane_vg2x2>; -defm BFMLAL_VG4_M4ZZI : sme2_fp_mla_long_array_vg4_index_za32<"bfmlal", 0b10, nxv8bf16, int_aarch64_sme_fmlal_lane_vg2x4>; +defm BFMLAL_MZZI : sme2_mla_long_array_index<"bfmlal", 0b10, 0b10, nxv8bf16, int_aarch64_sme_fmlal_lane_vg2x1>; +defm BFMLAL_VG2_M2ZZI : sme2_fp_mla_long_array_vg2_index<"bfmlal", 0b10, nxv8bf16, int_aarch64_sme_fmlal_lane_vg2x2>; +defm BFMLAL_VG4_M4ZZI : sme2_fp_mla_long_array_vg4_index<"bfmlal", 0b10, nxv8bf16, int_aarch64_sme_fmlal_lane_vg2x4>; defm BFMLAL_MZZ : sme2_mla_long_array_single<"bfmlal", 0b00, 0b10, nxv8bf16, int_aarch64_sme_fmlal_single_vg2x1>; defm BFMLAL_VG2_M2ZZ_HtoS : sme2_fp_mla_long_array_vg2_single<"bfmlal", 0b100, MatrixOp32, ZZ_h, ZPR4b16, nxv8bf16, int_aarch64_sme_fmlal_single_vg2x2>; defm BFMLAL_VG4_M4ZZ_HtoS : sme2_fp_mla_long_array_vg4_single<"bfmlal", 0b100, MatrixOp32, ZZZZ_h, ZPR4b16, nxv8bf16, int_aarch64_sme_fmlal_single_vg2x4>; defm BFMLAL_VG2_M2Z2Z_HtoS : sme2_fp_mla_long_array_vg2_multi<"bfmlal", 0b010, MatrixOp32, ZZ_h_mul_r, nxv8bf16, int_aarch64_sme_fmlal_vg2x2>; defm BFMLAL_VG4_M4Z4Z_HtoS : sme2_fp_mla_long_array_vg4_multi<"bfmlal", 0b010, MatrixOp32, ZZZZ_h_mul_r, nxv8bf16, int_aarch64_sme_fmlal_vg2x4>; -defm BFMLSL_MZZI : sme2_mla_long_array_index_za32<"bfmlsl", 0b10, 0b11, nxv8bf16, int_aarch64_sme_fmlsl_lane_vg2x1>; -defm BFMLSL_VG2_M2ZZI : sme2_fp_mla_long_array_vg2_index_za32<"bfmlsl", 0b11, nxv8bf16, int_aarch64_sme_fmlsl_lane_vg2x2>; -defm BFMLSL_VG4_M4ZZI : sme2_fp_mla_long_array_vg4_index_za32<"bfmlsl", 0b11, nxv8bf16, int_aarch64_sme_fmlsl_lane_vg2x4>; +defm BFMLSL_MZZI : sme2_mla_long_array_index<"bfmlsl", 0b10, 0b11, nxv8bf16, int_aarch64_sme_fmlsl_lane_vg2x1>; +defm BFMLSL_VG2_M2ZZI : sme2_fp_mla_long_array_vg2_index<"bfmlsl", 0b11, nxv8bf16, int_aarch64_sme_fmlsl_lane_vg2x2>; +defm BFMLSL_VG4_M4ZZI : sme2_fp_mla_long_array_vg4_index<"bfmlsl", 0b11, nxv8bf16, int_aarch64_sme_fmlsl_lane_vg2x4>; defm BFMLSL_MZZ : sme2_mla_long_array_single<"bfmlsl", 0b00, 0b11, nxv8bf16, int_aarch64_sme_fmlsl_single_vg2x1>; defm BFMLSL_VG2_M2ZZ_HtoS : sme2_fp_mla_long_array_vg2_single<"bfmlsl", 0b110, MatrixOp32, ZZ_h, ZPR4b16, nxv8bf16, int_aarch64_sme_fmlsl_single_vg2x2>; defm BFMLSL_VG4_M4ZZ_HtoS : sme2_fp_mla_long_array_vg4_single<"bfmlsl", 0b110, MatrixOp32, ZZZZ_h, ZPR4b16, nxv8bf16, int_aarch64_sme_fmlsl_single_vg2x4>; defm BFMLSL_VG2_M2Z2Z_HtoS : sme2_fp_mla_long_array_vg2_multi<"bfmlsl", 0b011, MatrixOp32, ZZ_h_mul_r, nxv8bf16, int_aarch64_sme_fmlsl_vg2x2>; defm BFMLSL_VG4_M4Z4Z_HtoS : sme2_fp_mla_long_array_vg4_multi<"bfmlsl", 0b011, MatrixOp32, ZZZZ_h_mul_r, nxv8bf16, int_aarch64_sme_fmlsl_vg2x4>; -defm SMLAL_MZZI : sme2_mla_long_array_index_za32<"smlal", 0b11, 0b00, nxv8i16, int_aarch64_sme_smlal_lane_vg2x1>; +defm SMLAL_MZZI : sme2_mla_long_array_index<"smlal", 0b11, 0b00, nxv8i16, int_aarch64_sme_smlal_lane_vg2x1>; defm SMLAL_VG2_M2ZZI : sme2_int_mla_long_array_vg2_index<"smlal", 0b00, int_aarch64_sme_smlal_lane_vg2x2>; defm SMLAL_VG4_M4ZZI : sme2_int_mla_long_array_vg4_index<"smlal", 0b00, int_aarch64_sme_smlal_lane_vg2x4>; defm SMLAL_MZZ : sme2_mla_long_array_single<"smlal",0b01, 0b00, nxv8i16, int_aarch64_sme_smlal_single_vg2x1>; @@ -384,7 +384,7 @@ defm SMLAL_VG4_M4ZZ : sme2_int_mla_long_array_vg4_single<"smlal", 0b00, int_aar defm SMLAL_VG2_M2Z2Z : sme2_int_mla_long_array_vg2_multi<"smlal", 0b00, int_aarch64_sme_smlal_vg2x2>; defm SMLAL_VG4_M4Z4Z : sme2_int_mla_long_array_vg4_multi<"smlal", 0b00, int_aarch64_sme_smlal_vg2x4>; -defm SMLSL_MZZI : sme2_mla_long_array_index_za32<"smlsl", 0b11, 0b01, nxv8i16, int_aarch64_sme_smlsl_lane_vg2x1>; +defm SMLSL_MZZI : sme2_mla_long_array_index<"smlsl", 0b11, 0b01, nxv8i16, int_aarch64_sme_smlsl_lane_vg2x1>; defm SMLSL_VG2_M2ZZI : sme2_int_mla_long_array_vg2_index<"smlsl", 0b01, int_aarch64_sme_smlsl_lane_vg2x2>; defm SMLSL_VG4_M4ZZI : sme2_int_mla_long_array_vg4_index<"smlsl", 0b01, int_aarch64_sme_smlsl_lane_vg2x4>; defm SMLSL_MZZ : sme2_mla_long_array_single<"smlsl",0b01, 0b01, nxv8i16, int_aarch64_sme_smlsl_single_vg2x1>; @@ -393,7 +393,7 @@ defm SMLSL_VG4_M4ZZ : sme2_int_mla_long_array_vg4_single<"smlsl", 0b01, int_aar defm SMLSL_VG2_M2Z2Z : sme2_int_mla_long_array_vg2_multi<"smlsl", 0b01, int_aarch64_sme_smlsl_vg2x2>; defm SMLSL_VG4_M4Z4Z : sme2_int_mla_long_array_vg4_multi<"smlsl", 0b01, int_aarch64_sme_smlsl_vg2x4>; -defm UMLAL_MZZI : sme2_mla_long_array_index_za32<"umlal", 0b11, 0b10, nxv8i16, int_aarch64_sme_umlal_lane_vg2x1>; +defm UMLAL_MZZI : sme2_mla_long_array_index<"umlal", 0b11, 0b10, nxv8i16, int_aarch64_sme_umlal_lane_vg2x1>; defm UMLAL_VG2_M2ZZI : sme2_int_mla_long_array_vg2_index<"umlal", 0b10, int_aarch64_sme_umlal_lane_vg2x2>; defm UMLAL_VG4_M4ZZI : sme2_int_mla_long_array_vg4_index<"umlal", 0b10, int_aarch64_sme_umlal_lane_vg2x4>; defm UMLAL_MZZ : sme2_mla_long_array_single<"umlal",0b01, 0b10, nxv8i16, int_aarch64_sme_umlal_single_vg2x1>; @@ -402,7 +402,7 @@ defm UMLAL_VG4_M4ZZ : sme2_int_mla_long_array_vg4_single<"umlal", 0b10, int_aar defm UMLAL_VG2_M2Z2Z : sme2_int_mla_long_array_vg2_multi<"umlal", 0b10, int_aarch64_sme_umlal_vg2x2>; defm UMLAL_VG4_M4Z4Z : sme2_int_mla_long_array_vg4_multi<"umlal", 0b10, int_aarch64_sme_umlal_vg2x4>; -defm UMLSL_MZZI : sme2_mla_long_array_index_za32<"umlsl", 0b11, 0b11, nxv8i16, int_aarch64_sme_umlsl_lane_vg2x1>; +defm UMLSL_MZZI : sme2_mla_long_array_index<"umlsl", 0b11, 0b11, nxv8i16, int_aarch64_sme_umlsl_lane_vg2x1>; defm UMLSL_VG2_M2ZZI : sme2_int_mla_long_array_vg2_index<"umlsl", 0b11, int_aarch64_sme_umlsl_lane_vg2x2>; defm UMLSL_VG4_M4ZZI : sme2_int_mla_long_array_vg4_index<"umlsl", 0b11, int_aarch64_sme_umlsl_lane_vg2x4>; defm UMLSL_MZZ : sme2_mla_long_array_single<"umlsl",0b01, 0b11, nxv8i16, int_aarch64_sme_umlsl_single_vg2x1>; @@ -994,10 +994,9 @@ defm FDOT_VG4_M4ZZ_BtoH : sme2_dot_mla_add_sub_array_vg24_single<"fdot", 0b01 defm FDOT_VG2_M2Z2Z_BtoH : sme2_dot_mla_add_sub_array_vg2_multi<"fdot", 0b0100100, MatrixOp16, ZZ_b_mul_r, nxv16i8, null_frag>; defm FDOT_VG4_M4Z4Z_BtoH : sme2_dot_mla_add_sub_array_vg4_multi<"fdot", 0b0100100, MatrixOp16, ZZZZ_b_mul_r, nxv16i8, null_frag>; -defm FMLAL_MZZI_BtoH : sme2_mla_long_array_index_za16<"fmlal", int_aarch64_sme_fp8_fmlal_lane_za16_vg2x1>; -defm FMLAL_VG2_M2ZZI_BtoH : sme2_mla_long_array_vg2_index_za16<"fmlal", int_aarch64_sme_fp8_fmlal_lane_za16_vg2x2>; -defm FMLAL_VG4_M4ZZI_BtoH : sme2_mla_long_array_vg4_index_za16<"fmlal", int_aarch64_sme_fp8_fmlal_lane_za16_vg2x4>; - +defm FMLAL_MZZI_BtoH : sme2_fp8_fmlal_indexed_za16<"fmlal", int_aarch64_sme_fp8_fmlal_lane_za16_vg2x1>; +defm FMLAL_VG2_M2ZZI_BtoH : sme2_fp8_fmlal_indexed_za16_vgx2<"fmlal", int_aarch64_sme_fp8_fmlal_lane_za16_vg2x2>; +defm FMLAL_VG4_M4ZZI_BtoH : sme2_fp8_fmlal_indexed_za16_vgx4<"fmlal", int_aarch64_sme_fp8_fmlal_lane_za16_vg2x4>; def FMLAL_VG2_MZZ_BtoH : sme2_mla_long_array_single_16b<"fmlal">; defm FMLAL_VG2_M2ZZ_BtoH : sme2_fp_mla_long_array_vg2_single<"fmlal", 0b001, MatrixOp16, ZZ_b, ZPR4b8, nxv16i8, null_frag>; @@ -1021,9 +1020,9 @@ defm FDOT_VG4_M4Z4Z_BtoS : sme2_dot_mla_add_sub_array_vg4_multi<"fdot", 0b0100 def FVDOTB_VG4_M2ZZI_BtoS : sme2_fp8_multi_vec_array_vg4_index<"fvdotb", 0b0>; def FVDOTT_VG4_M2ZZI_BtoS : sme2_fp8_multi_vec_array_vg4_index<"fvdott", 0b1>; -defm FMLALL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"fmlall", 0b01, 0b000, int_aarch64_sme_fp8_fmlall_lane_za32_vg4x1>; -defm FMLALL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"fmlall", 0b10, 0b100, int_aarch64_sme_fp8_fmlall_lane_za32_vg4x2>; -defm FMLALL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"fmlall", 0b00, 0b1000, int_aarch64_sme_fp8_fmlall_lane_za32_vg4x4>; +defm FMLALL_MZZI_BtoS : sme2_fp8_fmlall_indexed_za32<"fmlall", int_aarch64_sme_fp8_fmlall_lane_za32_vg4x1>; +defm FMLALL_VG2_M2ZZI_BtoS : sme2_fp8_fmlall_indexed_za32_vgx2<"fmlall", int_aarch64_sme_fp8_fmlall_lane_za32_vg4x2>; +defm FMLALL_VG4_M4ZZI_BtoS : sme2_fp8_fmlall_indexed_za32_vgx4<"fmlall", int_aarch64_sme_fp8_fmlall_lane_za32_vg4x4>; defm FMLALL_MZZ_BtoS : sme2_mla_ll_array_single<"fmlall", 0b01000, MatrixOp32, ZPR8, ZPR4b8, nxv16i8, null_frag>; defm FMLALL_VG2_M2ZZ_BtoS : sme2_mla_ll_array_vg24_single<"fmlall", 0b000001, MatrixOp32, ZZ_b, ZPR4b8>; diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td index 7723caf78500a4..79abc31d29a2cf 100644 --- a/llvm/lib/Target/AArch64/SMEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td @@ -1972,12 +1972,12 @@ multiclass sme2p1_bf_max_min_vector_vg4_multi<string mnemonic, bits<7>op> { //===----------------------------------------------------------------------===// // SME2 Multi-vector - Index/Single/Multi Array Vectors FMA sources -class sme2_mla_long_array_index_base<bits<2> op0, MatrixOperand mat_ty, Operand slice_ty, - RegisterOperand multi_vector_ty, RegisterOperand vector_ty, - Operand index_ty, string mnemonic, string vg_acronym=""> - : I<(outs mat_ty:$ZAda), - (ins mat_ty:$_ZAda, MatrixIndexGPR32Op8_11:$Rv, slice_ty:$imm, multi_vector_ty:$Zn, vector_ty:$Zm, index_ty:$vi), - mnemonic, "\t$ZAda[$Rv, $imm" # !if(!eq(vg_acronym, ""), "", ", " # vg_acronym) # "], $Zn, $Zm$vi", +class sme2_mla_long_array_index_base<bits<2> op0, bits<2> op, Operand index_ty, + RegisterOperand multi_vector_ty, + string mnemonic, string vg_acronym=""> + : I<(outs MatrixOp32:$ZAda), + (ins MatrixOp32:$_ZAda, MatrixIndexGPR32Op8_11:$Rv, index_ty:$imm, multi_vector_ty:$Zn, ZPR4b16:$Zm, VectorIndexH32b_timm:$i3), + mnemonic, "\t$ZAda[$Rv, $imm" # !if(!eq(vg_acronym, ""), "", ", " # vg_acronym) # "], $Zn, $Zm$i3", "", []>, Sched<[]> { bits<4> Zm; bits<2> Rv; @@ -1987,91 +1987,21 @@ class sme2_mla_long_array_index_base<bits<2> op0, MatrixOperand mat_ty, Operand let Inst{20} = !if(!eq(vg_acronym, ""), 0, 1); let Inst{19-16} = Zm; let Inst{14-13} = Rv; + let Inst{12} = 0b1; + let Inst{4-3} = op; let Constraints = "$ZAda = $_ZAda"; } -multiclass sme2_mla_long_array_index_za16<string mnemonic, SDPatternOperator intrinsic> { - def : sme2_mla_long_array_index_base<0b11, MatrixOp16, uimm3s2range, ZPR8, ZPR4b8, VectorIndexB32b_timm, - mnemonic>, SMEPseudo2Instr<NAME, 1> { - bits<4> vi; - bits<3> imm; - bits<5> Zn; - let Inst{15} = vi{3}; - let Inst{12} = 0b0; - let Inst{11-10} = vi{2-1}; - let Inst{9-5} = Zn; - let Inst{4} = 0b0; - let Inst{3} = vi{0}; - let Inst{2-0} = imm; - } - - def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, uimm3s2range, ZPR8, ZPR4b8, VectorIndexB32b_timm, SMEMatrixArray>; - - def : SME2_ZA_TwoOp_Multi_Index_Pat<NAME, intrinsic, uimm3s2range, ZPR4b8, nxv16i8, VectorIndexB32b_timm, tileslicerange3s2>; -} - -multiclass sme2_mla_long_array_vg2_index_za16<string mnemonic, SDPatternOperator intrinsic> { - - def NAME : sme2_mla_long_array_index_base<0b10, MatrixOp16, uimm2s2range, ZZ_b_mul_r, ZPR4b8, - VectorIndexB32b_timm, mnemonic, "vgx2">, SMEPseudo2Instr<NAME, 1> { - bits<4> vi; - bits<2> imm; - bits<4> Zn; - let Inst{15} = 0b0; - let Inst{12} = 0b1; - let Inst{11-10} = vi{3-2}; - let Inst{9-6} = Zn; - let Inst{5-4} = 0b11; - let Inst{3-2} = vi{1-0}; - let Inst{1-0} = imm; - } - - def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, uimm2s2range, ZZ_b_mul_r, ZPR4b8, VectorIndexB32b_timm, SMEMatrixArray>; - - def : SME2_ZA_TwoOp_VG2_Multi_Index_Pat<NAME, intrinsic, uimm2s2range, ZPR4b8, nxv16i8, VectorIndexB32b_timm, tileslicerange2s2>; - - def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm], $Zn, $Zm$vimm", - (!cast<Instruction>(NAME) MatrixOp16:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm2s2range:$imm, - ZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$vimm), 0>; -} - -multiclass sme2_mla_long_array_vg4_index_za16<string mnemonic, SDPatternOperator intrinsic> { - - def NAME : sme2_mla_long_array_index_base<0b10, MatrixOp16, uimm2s2range, ZZZZ_b_mul_r, ZPR4b8, - VectorIndexB32b_timm, mnemonic, "vgx4">, SMEPseudo2Instr<NAME, 1> { - bits<4> vi; - bits<2> imm; - bits<3> Zn; - let Inst{15} = 0b1; - let Inst{12} = 0b1; - let Inst{11-10} = vi{3-2}; - let Inst{9-7} = Zn; - let Inst{6-4} = 0b010; - let Inst{3-2} = vi{1-0}; - let Inst{1-0} = imm; - } - - def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, uimm2s2range, ZZZZ_b_mul_r, ZPR4b8, VectorIndexB32b_timm, SMEMatrixArray>; - - def : SME2_ZA_TwoOp_VG4_Multi_Index_Pat<NAME, intrinsic, uimm2s2range, ZPR4b8, nxv16i8, VectorIndexB32b_timm, tileslicerange2s2>; - - def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm], $Zn, $Zm$vimm", - (!cast<Instruction>(NAME) MatrixOp16:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm2s2range:$imm, - ZZZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$vimm), 0>; -} - -multiclass sme2_mla_long_array_index_za32<string mnemonic, bits<2> op0, bits<2> op, ValueType zpr_ty, SDPatternOperator intrinsic> { - def _HtoS : sme2_mla_long_array_index_base<op0, MatrixOp32, uimm3s2range, ZPR16, ZPR4b16, - VectorIndexH32b_timm, mnemonic>, SMEPseudo2Instr<NAME # _HtoS, 1> { - bits<3> vi; +multiclass sme2_mla_long_array_index<string mnemonic, bits<2> op0, bits<2> op, ValueType zpr_ty, SDPatternOperator intrinsic> { + def _HtoS : sme2_mla_long_array_index_base<op0, op, uimm3s2range, ZPR16, + mnemonic>, SMEPseudo2Instr<NAME # _HtoS, 1> { + bits<3> i3; bits<5> Zn; bits<3> imm; - let Inst{15} = vi{2}; - let Inst{12} = 0b1; - let Inst{11-10} = vi{1-0}; + let Inst{15} = i3{2}; + let Inst{11-10} = i3{1-0}; let Inst{9-5} = Zn; - let Inst{4-3} = op; let Inst{2-0} = imm; } @@ -2080,24 +2010,22 @@ multiclass sme2_mla_long_array_index_za32<string mnemonic, bits<2> op0, bits<2> def : SME2_ZA_TwoOp_Multi_Index_Pat<NAME # _HtoS, intrinsic, uimm3s2range, ZPR4b16, zpr_ty, VectorIndexH32b_timm, tileslicerange3s2>; } -class sme2_mla_long_array_vg2_index_za32<string mnemonic, bits<2> op0, bits<2> op> - : sme2_mla_long_array_index_base<op0, MatrixOp32, uimm2s2range, ZZ_h_mul_r, ZPR4b16, - VectorIndexH32b_timm, mnemonic, "vgx2"> { - bits<3> vi; +class sme2_mla_long_array_vg2_index<string mnemonic, bits<2> op0, bits<2> op> + : sme2_mla_long_array_index_base<op0, op, uimm2s2range, ZZ_h_mul_r, + mnemonic, "vgx2"> { + bits<3> i3; bits<4> Zn; bits<2> imm; let Inst{15} = 0b0; - let Inst{12} = 0b1; - let Inst{11-10} = vi{2-1}; + let Inst{11-10} = i3{2-1}; let Inst{9-6} = Zn; let Inst{5} = 0b0; - let Inst{4-3} = op; - let Inst{2} = vi{0}; + let Inst{2} = i3{0}; let Inst{1-0} = imm; } -multiclass sme2_fp_mla_long_array_vg2_index_za32<string mnemonic, bits<2> op, ValueType zpr_ty, SDPatternOperator intrinsic> { - def _HtoS : sme2_mla_long_array_vg2_index_za32<mnemonic, 0b10, op>, SMEPseudo2Instr<NAME # _HtoS, 1>; +multiclass sme2_fp_mla_long_array_vg2_index<string mnemonic, bits<2> op, ValueType zpr_ty, SDPatternOperator intrinsic> { + def _HtoS : sme2_mla_long_array_vg2_index<mnemonic, 0b10, op>, SMEPseudo2Instr<NAME # _HtoS, 1>; def _HtoS_PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME # _HtoS, uimm2s2range, ZZ_h_mul_r, ZPR4b16, VectorIndexH32b_timm, SMEMatrixArray>; @@ -2108,7 +2036,7 @@ multiclass sme2_fp_mla_long_array_vg2_index_za32<string mnemonic, bits<2> op, Va } multiclass sme2_int_mla_long_array_vg2_index<string mnemonic, bits<2> op, SDPatternOperator intrinsic> { - def _S : sme2_mla_long_array_vg2_index_za32<mnemonic, 0b11, op>, SMEPseudo2Instr<NAME # _S, 1>; + def _S : sme2_mla_long_array_vg2_index<mnemonic, 0b11, op>, SMEPseudo2Instr<NAME # _S, 1>; def _S_PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME # _S, uimm2s2range, ZZ_h_mul_r, ZPR4b16, VectorIndexH32b_timm, SMEMatrixArray>; @@ -2118,24 +2046,22 @@ multiclass sme2_int_mla_long_array_vg2_index<string mnemonic, bits<2> op, SDPatt (!cast<Instruction>(NAME #_S) MatrixOp32:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm2s2range:$imm, ZZ_h_mul_r:$Zn, ZPR4b16:$Zm, VectorIndexH32b_timm:$i3), 0>; } -class sme2_mla_long_array_vg4_index_za32<string mnemonic, bits<2> op0, bits<2> op> - : sme2_mla_long_array_index_base<op0, MatrixOp32, uimm2s2range, ZZZZ_h_mul_r, ZPR4b16, VectorIndexH32b_timm, +class sme2_mla_long_array_vg4_index<string mnemonic, bits<2> op0, bits<2> op> + : sme2_mla_long_array_index_base<op0, op, uimm2s2range, ZZZZ_h_mul_r, mnemonic, "vgx4"> { - bits<3> vi; + bits<3> i3; bits<3> Zn; bits<2> imm; let Inst{15} = 0b1; - let Inst{12} = 0b1; - let Inst{11-10} = vi{2-1}; + let Inst{11-10} = i3{2-1}; let Inst{9-7} = Zn; let Inst{6-5} = 0b00; - let Inst{4-3} = op; - let Inst{2} = vi{0}; + let Inst{2} = i3{0}; let Inst{1-0} = imm; } -multiclass sme2_fp_mla_long_array_vg4_index_za32<string mnemonic, bits<2> op, ValueType zpr_ty, SDPatternOperator intrinsic> { - def _HtoS : sme2_mla_long_array_vg4_index_za32<mnemonic, 0b10, op>, SMEPseudo2Instr<NAME # _HtoS, 1>; +multiclass sme2_fp_mla_long_array_vg4_index<string mnemonic, bits<2> op, ValueType zpr_ty, SDPatternOperator intrinsic> { + def _HtoS : sme2_mla_long_array_vg4_index<mnemonic, 0b10, op>, SMEPseudo2Instr<NAME # _HtoS, 1>; def _HtoS_PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME # _HtoS, uimm2s2range, ZZZZ_h_mul_r, ZPR4b16, VectorIndexH32b_timm, SMEMatrixArray>; @@ -2146,7 +2072,7 @@ multiclass sme2_fp_mla_long_array_vg4_index_za32<string mnemonic, bits<2> op, Va } multiclass sme2_int_mla_long_array_vg4_index<string mnemonic, bits<2> op, SDPatternOperator intrinsic> { - def _HtoS : sme2_mla_long_array_vg4_index_za32<mnemonic, 0b11, op>, SMEPseudo2Instr<NAME # _HtoS, 1>; + def _HtoS : sme2_mla_long_array_vg4_index<mnemonic, 0b11, op>, SMEPseudo2Instr<NAME # _HtoS, 1>; def _HtoS_PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME # _HtoS, uimm2s2range, ZZZZ_h_mul_r, ZPR4b16, VectorIndexH32b_timm, SMEMatrixArray>; @@ -2954,7 +2880,102 @@ multiclass sme2_multi_vec_array_vg4_index_64b<string mnemonic, bits<3> op, multi_vector_ty:$Zn, vector_ty:$Zm, VectorIndexD32b_timm:$i1), 0>; } +// FMLAL (multiple and indexed vector, FP8 to FP16) +class sme2_multi_vec_array_vg24_index_16b<bits<2> sz, bit vg4, bits<3> op, + RegisterOperand multi_vector_ty, string mnemonic> + : I<(outs MatrixOp16:$ZAda), + (ins MatrixOp16:$_ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm2s2range:$imm2, + multi_vector_ty:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$i), + mnemonic, "\t$ZAda[$Rv, $imm2, " # !if(vg4, "vgx4", "vgx2") # "], $Zn, $Zm$i", + "", []>, Sched<[]> { + bits<4> Zm; + bits<2> Rv; + bits<4> i; + bits<2> imm2; + let Inst{31-24} = 0b11000001; + let Inst{23-22} = sz; + let Inst{21-20} = 0b01; + let Inst{19-16} = Zm; + let Inst{15} = vg4; + let Inst{14-13} = Rv; + let Inst{12} = op{2}; + let Inst{11-10} = i{3-2}; + let Inst{5-4} = op{1-0}; + let Inst{3-2} = i{1-0}; + let Inst{1-0} = imm2; + + let Uses = [FPMR, FPCR]; + let Constraints = "$ZAda = $_ZAda"; +} + +multiclass sme2_fp8_fmlal_indexed_za16_vgx2<string mnemonic, SDPatternOperator intrinsic> { + def NAME : sme2_multi_vec_array_vg24_index_16b<0b10, 0b0, 0b111, ZZ_b_mul_r, mnemonic>, SMEPseudo2Instr<NAME, 1> { + bits<4> Zn; + let Inst{9-6} = Zn; + } + + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, uimm2s2range, ZZ_b_mul_r, ZPR4b8, VectorIndexB32b_timm, SMEMatrixArray>; + + def : SME2_ZA_TwoOp_VG2_Multi_Index_Pat<NAME, intrinsic, uimm2s2range, ZPR4b8, nxv16i8, VectorIndexB32b_timm, tileslicerange2s2>; + + def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm2], $Zn, $Zm$i", + (!cast<Instruction>(NAME) MatrixOp16:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm2s2range:$imm2, + ZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$i), 0>; +} + +multiclass sme2_fp8_fmlal_indexed_za16_vgx4<string mnemonic, SDPatternOperator intrinsic> { + def NAME: sme2_multi_vec_array_vg24_index_16b<0b10, 0b1, 0b110, ZZZZ_b_mul_r, mnemonic>, SMEPseudo2Instr<NAME, 1> { + bits<3> Zn; + let Inst{9-7} = Zn; + let Inst{6} = 0b0; + } + + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, uimm2s2range, ZZZZ_b_mul_r, ZPR4b8, VectorIndexB32b_timm, SMEMatrixArray>; + + def : SME2_ZA_TwoOp_VG4_Multi_Index_Pat<NAME, intrinsic, uimm2s2range, ZPR4b8, nxv16i8, VectorIndexB32b_timm, tileslicerange2s2>; + + def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm], $Zn, $Zm$i", + (!cast<Instruction>(NAME) MatrixOp16:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm2s2range:$imm, + ZZZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$i), 0>; +} + //===----------------------------------------------------------------------===// +// FMLAL (single and indexed vector, FP8 to FP16) +class sme2_mla_long_array_index_16b<string mnemonic, bits<2> sz,bits<2> op> + : I<(outs MatrixOp16:$ZAda), + (ins MatrixOp16:$_ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm3s2range:$imm3, ZPR8:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$i), + mnemonic, "\t$ZAda[$Rv, $imm3], $Zn, $Zm$i", + "", []>, Sched<[]> { + bits<4> Zm; + bits<2> Rv; + bits<4> i; + bits<5> Zn; + bits<3> imm3; + let Inst{31-24} = 0b11000001; + let Inst{23-22} = sz; + let Inst{21-20} = 0b00; + let Inst{19-16} = Zm; + let Inst{15} = i{3}; + let Inst{14-13} = Rv; + let Inst{12} = op{1}; + let Inst{11-10} = i{2-1}; + let Inst{9-5} = Zn; + let Inst{4} = op{0}; + let Inst{3} = i{0}; + let Inst{2-0} = imm3; + + let Uses = [FPMR, FPCR]; + let Constraints = "$ZAda = $_ZAda"; +} + +multiclass sme2_fp8_fmlal_indexed_za16<string mnemonic, SDPatternOperator intrinsic> { + def NAME : sme2_mla_long_array_index_16b<mnemonic, 0b11, 0b00>, SMEPseudo2Instr<NAME, 1>; + + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, uimm3s2range, ZPR8, ZPR4b8, VectorIndexB32b_timm, SMEMatrixArray>; + + def : SME2_ZA_TwoOp_Multi_Index_Pat<NAME, intrinsic, uimm3s2range, ZPR4b8, nxv16i8, VectorIndexB32b_timm, tileslicerange3s2>; +} + // SME2 multi-vec indexed long long MLA one source 32-bit class sme2_mla_ll_array_index_32b<string mnemonic, bits<2> sz, bits<3> op> : I<(outs MatrixOp32:$ZAda), @@ -2988,6 +3009,16 @@ multiclass sme2_mla_ll_array_index_32b<string mnemonic, bits<2> sz, bits<3> op, def : SME2_ZA_TwoOp_Multi_Index_Pat<NAME, intrinsic, uimm2s4range, ZPR4b8, nxv16i8, VectorIndexB32b_timm, tileslicerange2s4>; } +multiclass sme2_fp8_fmlall_indexed_za32<string mnemonic, SDPatternOperator intrinsic> { + def NAME : sme2_mla_ll_array_index_32b<mnemonic, 0b01, 0b000>, SMEPseudo2Instr<NAME, 1> { + let Uses = [FPMR, FPCR]; + } + + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, uimm2s4range, ZPR8, ZPR4b8, VectorIndexB32b_timm, SMEMatrixArray>; + + def : SME2_ZA_TwoOp_Multi_Index_Pat<NAME, intrinsic, uimm2s4range, ZPR4b8, nxv16i8, VectorIndexB32b_timm, tileslicerange2s4>; +} + // SME2 multi-vec indexed long long MLA one source 64-bit class sme2_mla_ll_array_index_64b<string mnemonic, bits<2> op> @@ -3065,6 +3096,22 @@ multiclass sme2_mla_ll_array_vg2_index_32b<string mnemonic, bits<2> sz, bits<3> (!cast<Instruction>(NAME) MatrixOp32:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm1s4range:$imm, ZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$i), 0>; } +multiclass sme2_fp8_fmlall_indexed_za32_vgx2<string mnemonic, SDPatternOperator intrinsic> { + def NAME : sme2_mla_ll_array_vg24_index_32b<0b10, 0b0, 0b100, ZZ_b_mul_r, mnemonic>, SMEPseudo2Instr<NAME, 1> { + bits<4> Zn; + let Inst{9-6} = Zn; + let Uses = [FPMR, FPCR]; + } + + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, uimm1s4range, ZZ_b_mul_r, ZPR4b8, VectorIndexB32b_timm, SMEMatrixArray>; + + def : SME2_ZA_TwoOp_VG2_Multi_Index_Pat<NAME, intrinsic, uimm1s4range, ZPR4b8, nxv16i8, VectorIndexB32b_timm, tileslicerange1s4>; + + def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm], $Zn, $Zm$i", + (!cast<Instruction>(NAME) MatrixOp32:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm1s4range:$imm, + ZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$i), 0>; +} + // SME2 multi-vec indexed long long MLA four sources 32-bit multiclass sme2_mla_ll_array_vg4_index_32b<string mnemonic, bits<2> sz, bits<4> op, SDPatternOperator intrinsic> { @@ -3081,6 +3128,24 @@ multiclass sme2_mla_ll_array_vg4_index_32b<string mnemonic, bits<2> sz, bits<4> def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm], $Zn, $Zm$i", (!cast<Instruction>(NAME) MatrixOp32:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm1s4range:$imm, ZZZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$i), 0>; } + +multiclass sme2_fp8_fmlall_indexed_za32_vgx4<string mnemonic, SDPatternOperator intrinsic> { + def NAME : sme2_mla_ll_array_vg24_index_32b<0b00, 0b1, 0b000, ZZZZ_b_mul_r, mnemonic>, SMEPseudo2Instr<NAME, 1> { + bits<3> Zn; + let Inst{9-7} = Zn; + let Inst{6} = 0b1; + let Uses = [FPMR, FPCR]; + } + + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, uimm1s4range, ZZZZ_b_mul_r, ZPR4b8, VectorIndexB32b_timm, SMEMatrixArray>; + + def : SME2_ZA_TwoOp_VG4_Multi_Index_Pat<NAME, intrinsic, uimm1s4range, ZPR4b8, nxv16i8, VectorIndexB32b_timm, tileslicerange1s4>; + + def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm], $Zn, $Zm$i", + (!cast<Instruction>(NAME) MatrixOp32:$ZAda, MatrixIndexGPR32Op8_11:$Rv, + uimm1s4range:$imm, ZZZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$i), 0>; +} + class sme2_mla_ll_array_vg24_index_64b<bit vg4, bits<2> op, RegisterOperand vector_ty, string mnemonic> _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits