Author: Jonathan Thackray Date: 2024-12-13T09:09:36Z New Revision: 1fd3d1d04e6339fff7ef5b8b172ed4954885dde1
URL: https://github.com/llvm/llvm-project/commit/1fd3d1d04e6339fff7ef5b8b172ed4954885dde1 DIFF: https://github.com/llvm/llvm-project/commit/1fd3d1d04e6339fff7ef5b8b172ed4954885dde1.diff LOG: [AArch64] Add intrinsics for SME FP8 FDOT LANE instructions (#118492) Add support for the following SME 8 bit floating-point dot-product intrinsics: * void svdot_lane_za16_mf8_vg1x2_fpm(uint32_t slice, svmfloat8x2_t zn, svmfloat8_t zm, uint64_t imm_idx, fpm_t fpm); * void svdot_lane_za16_mf8_vg1x4_fpm(uint32_t slice, svmfloat8x4_t zn, svmfloat8_t zm, uint64_t imm_idx, fpm_t fpm); * void svdot_lane_za32_mf8_vg1x2_fpm(uint32_t slice, svmfloat8x2_t zn, svmfloat8_t zm, uint64_t imm_idx, fpm_t fpm); * void svdot_lane_za32_mf8_vg1x4_fpm(uint32_t slice, svmfloat8x4_t zn, svmfloat8_t zm, uint64_t imm_idx, fpm_t fpm); --------- Co-authored-by: Momchil Velikov <momchil.veli...@arm.com> Co-authored-by: Marian Lukac <marian.lu...@arm.com> Co-authored-by: Caroline Concatto <caroline.conca...@arm.com> Co-authored-by: SpencerAbson <spencer.ab...@arm.com> Added: clang/test/CodeGen/AArch64/sme2-intrinsics/acle_sme2_fp8_fdot.c clang/test/Sema/aarch64-sme2-intrinsics/acle_sme2_fp8_fdot.c llvm/test/CodeGen/AArch64/sme2-intrinsics-fp8-fdot.ll Modified: clang/include/clang/Basic/arm_sme.td clang/include/clang/Basic/arm_sve_sme_incl.td llvm/include/llvm/IR/IntrinsicsAArch64.td llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td llvm/lib/Target/AArch64/SMEInstrFormats.td Removed: ################################################################################ diff --git a/clang/include/clang/Basic/arm_sme.td b/clang/include/clang/Basic/arm_sme.td index 71b2c7cdd04f93..0fae70866cd55e 100644 --- a/clang/include/clang/Basic/arm_sme.td +++ b/clang/include/clang/Basic/arm_sme.td @@ -740,6 +740,21 @@ let SMETargetGuard = "sme2" in { def SVLUTI4_LANE_ZT_X2 : Inst<"svluti4_lane_zt_{d}_x2", "2.di[i", "cUcsUsiUibhf", MergeNone, "aarch64_sme_luti4_lane_zt_x2", [IsStreaming, IsInZT0], [ImmCheck<0, ImmCheck0_0>, ImmCheck<2, ImmCheck0_3>]>; } +// +// SME2 FP8 instructions +// + +// FDOT +let SMETargetGuard = "sme-f8f32" in { + def SVDOT_LANE_FP8_ZA32_VG1x2 : Inst<"svdot_lane_za32[_mf8]_vg1x2_fpm", "vm2di>", "m", MergeNone, "aarch64_sme_fp8_fdot_lane_za32_vg1x2", [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<3, ImmCheck0_3>]>; + def SVDOT_LANE_FP8_ZA32_VG1x4 : Inst<"svdot_lane_za32[_mf8]_vg1x4_fpm", "vm4di>", "m", MergeNone, "aarch64_sme_fp8_fdot_lane_za32_vg1x4", [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<3, ImmCheck0_3>]>; +} + +let SMETargetGuard = "sme-f8f16" in { + def SVDOT_LANE_FP8_ZA16_VG1x2 : Inst<"svdot_lane_za16[_mf8]_vg1x2_fpm", "vm2di>", "m", MergeNone, "aarch64_sme_fp8_fdot_lane_za16_vg1x2", [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<3, ImmCheck0_7>]>; + def SVDOT_LANE_FP8_ZA16_VG1x4 : Inst<"svdot_lane_za16[_mf8]_vg1x4_fpm", "vm4di>", "m", MergeNone, "aarch64_sme_fp8_fdot_lane_za16_vg1x4", [IsStreaming, IsInOutZA, SetsFPMR, IsOverloadNone], [ImmCheck<3, ImmCheck0_7>]>; +} + //////////////////////////////////////////////////////////////////////////////// // SME2p1 - FMOPA, FMOPS (non-widening) let SMETargetGuard = "sme-b16b16" in { diff --git a/clang/include/clang/Basic/arm_sve_sme_incl.td b/clang/include/clang/Basic/arm_sve_sme_incl.td index de10be7bdce0db..e7cc40db7dca6c 100644 --- a/clang/include/clang/Basic/arm_sve_sme_incl.td +++ b/clang/include/clang/Basic/arm_sve_sme_incl.td @@ -52,6 +52,7 @@ include "arm_immcheck_incl.td" // h: half-float // d: double // b: bfloat +// m: mfloat8 // Typespec modifiers // ------------------ diff --git a/clang/test/CodeGen/AArch64/sme2-intrinsics/acle_sme2_fp8_fdot.c b/clang/test/CodeGen/AArch64/sme2-intrinsics/acle_sme2_fp8_fdot.c new file mode 100644 index 00000000000000..74d18c32d5b3ab --- /dev/null +++ b/clang/test/CodeGen/AArch64/sme2-intrinsics/acle_sme2_fp8_fdot.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: aarch64-registered-target +#include <arm_sme.h> + +// RUN: %clang_cc1 -triple aarch64 -target-feature +sme -target-feature +sme2 -target-feature +sme-f8f16 -target-feature +sme-f8f32 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes mem2reg,instcombine,tailcallelim | FileCheck %s +// RUN: %clang_cc1 -triple aarch64 -target-feature +sme -target-feature +sme2 -target-feature +sme-f8f16 -target-feature +sme-f8f32 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK +// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sme -target-feature +sme2 -target-feature +sme-f8f16 -target-feature +sme-f8f32 -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes mem2reg,instcombine,tailcallelim | FileCheck %s +// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sme -target-feature +sme2 -target-feature +sme-f8f16 -target-feature +sme-f8f32 -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK +// RUN: %clang_cc1 -triple aarch64 -target-feature +sme -target-feature +sme2 -target-feature +sme-f8f16 -target-feature +sme-f8f32 -target-feature -S -disable-O0-optnone -Werror -Wall -o /dev/null %s +#include <arm_sme.h> + +#ifdef SVE_OVERLOADED_FORMS +#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3) A1##A3 +#else +#define SVE_ACLE_FUNC(A1,A2,A3) A1##A2##A3 +#endif + +// CHECK-LABEL: define dso_local void @test_svdot_lane_za32_f8_vg1x2( +// CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPMR:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR]]) +// CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fdot.lane.za32.vg1x2(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZM]], i32 3) +// CHECK-NEXT: ret void +// +// CPP-CHECK-LABEL: define dso_local void @_Z29test_svdot_lane_za32_f8_vg1x2j13svmfloat8x2_tu13__SVMfloat8_tm( +// CPP-CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPMR:%.*]]) #[[ATTR0:[0-9]+]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR]]) +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fdot.lane.za32.vg1x2(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZM]], i32 3) +// CPP-CHECK-NEXT: ret void +// +void test_svdot_lane_za32_f8_vg1x2(uint32_t slice, svmfloat8x2_t zn, + svmfloat8_t zm, fpm_t fpmr) + __arm_streaming __arm_inout("za") { + SVE_ACLE_FUNC(svdot_lane_za32,_mf8,_vg1x2_fpm)(slice, zn, zm, 3, fpmr); +} + +// CHECK-LABEL: define dso_local void @test_svdot_lane_za32_f8_vg1x4( +// CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZN_COERCE2:%.*]], <vscale x 16 x i8> [[ZN_COERCE3:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPMR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR]]) +// CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fdot.lane.za32.vg1x4(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZN_COERCE2]], <vscale x 16 x i8> [[ZN_COERCE3]], <vscale x 16 x i8> [[ZM]], i32 3) +// CHECK-NEXT: ret void +// +// CPP-CHECK-LABEL: define dso_local void @_Z29test_svdot_lane_za32_f8_vg1x4j13svmfloat8x4_tu13__SVMfloat8_tm( +// CPP-CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZN_COERCE2:%.*]], <vscale x 16 x i8> [[ZN_COERCE3:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPMR:%.*]]) #[[ATTR0]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR]]) +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fdot.lane.za32.vg1x4(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZN_COERCE2]], <vscale x 16 x i8> [[ZN_COERCE3]], <vscale x 16 x i8> [[ZM]], i32 3) +// CPP-CHECK-NEXT: ret void +// +void test_svdot_lane_za32_f8_vg1x4(uint32_t slice, svmfloat8x4_t zn, + svmfloat8_t zm, fpm_t fpmr) + __arm_streaming __arm_inout("za") { + SVE_ACLE_FUNC(svdot_lane_za32,_mf8,_vg1x4_fpm)(slice, zn, zm, 3, fpmr); +} + +// CHECK-LABEL: define dso_local void @test_svdot_lane_za16_f8_vg1x2( +// CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPMR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR]]) +// CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fdot.lane.za16.vg1x2(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZM]], i32 3) +// CHECK-NEXT: ret void +// +// CPP-CHECK-LABEL: define dso_local void @_Z29test_svdot_lane_za16_f8_vg1x2j13svmfloat8x2_tu13__SVMfloat8_tm( +// CPP-CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPMR:%.*]]) #[[ATTR0]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR]]) +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fdot.lane.za16.vg1x2(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZM]], i32 3) +// CPP-CHECK-NEXT: ret void +// +void test_svdot_lane_za16_f8_vg1x2(uint32_t slice, svmfloat8x2_t zn, + svmfloat8_t zm, fpm_t fpmr) + __arm_streaming __arm_inout("za") { + SVE_ACLE_FUNC(svdot_lane_za16,_mf8,_vg1x2_fpm)(slice, zn, zm, 3, fpmr); +} + +// CHECK-LABEL: define dso_local void @test_svdot_lane_za16_f8_vg1x4( +// CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZN_COERCE2:%.*]], <vscale x 16 x i8> [[ZN_COERCE3:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPMR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR]]) +// CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fdot.lane.za16.vg1x4(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZN_COERCE2]], <vscale x 16 x i8> [[ZN_COERCE3]], <vscale x 16 x i8> [[ZM]], i32 3) +// CHECK-NEXT: ret void +// +// CPP-CHECK-LABEL: define dso_local void @_Z29test_svdot_lane_za16_f8_vg1x4j13svmfloat8x4_tu13__SVMfloat8_tm( +// CPP-CHECK-SAME: i32 noundef [[SLICE:%.*]], <vscale x 16 x i8> [[ZN_COERCE0:%.*]], <vscale x 16 x i8> [[ZN_COERCE1:%.*]], <vscale x 16 x i8> [[ZN_COERCE2:%.*]], <vscale x 16 x i8> [[ZN_COERCE3:%.*]], <vscale x 16 x i8> [[ZM:%.*]], i64 noundef [[FPMR:%.*]]) #[[ATTR0]] { +// CPP-CHECK-NEXT: [[ENTRY:.*:]] +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.set.fpmr(i64 [[FPMR]]) +// CPP-CHECK-NEXT: tail call void @llvm.aarch64.sme.fp8.fdot.lane.za16.vg1x4(i32 [[SLICE]], <vscale x 16 x i8> [[ZN_COERCE0]], <vscale x 16 x i8> [[ZN_COERCE1]], <vscale x 16 x i8> [[ZN_COERCE2]], <vscale x 16 x i8> [[ZN_COERCE3]], <vscale x 16 x i8> [[ZM]], i32 3) +// CPP-CHECK-NEXT: ret void +// +void test_svdot_lane_za16_f8_vg1x4(uint32_t slice, svmfloat8x4_t zn, + svmfloat8_t zm, fpm_t fpmr) + __arm_streaming __arm_inout("za") { + SVE_ACLE_FUNC(svdot_lane_za16,_mf8,_vg1x4_fpm)(slice, zn, zm, 3, fpmr); +} diff --git a/clang/test/Sema/aarch64-sme2-intrinsics/acle_sme2_fp8_fdot.c b/clang/test/Sema/aarch64-sme2-intrinsics/acle_sme2_fp8_fdot.c new file mode 100644 index 00000000000000..975f0b2e3dd853 --- /dev/null +++ b/clang/test/Sema/aarch64-sme2-intrinsics/acle_sme2_fp8_fdot.c @@ -0,0 +1,38 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme -target-feature +sme2 -verify -emit-llvm -o - %s + +// REQUIRES: aarch64-registered-target + +#include <arm_sme.h> + +void test_features(uint32_t slice, svmfloat8_t f8, svmfloat8x2_t f8x2, + svmfloat8x4_t f8x4, uint64_t fpmr) __arm_streaming __arm_inout("za") { + // expected-error@+1 {{'svdot_lane_za32_mf8_vg1x2_fpm' needs target feature sme,sme-f8f32}} + svdot_lane_za32_mf8_vg1x2_fpm(slice, f8x2, f8, 3, fpmr); + // expected-error@+1 {{'svdot_lane_za32_mf8_vg1x4_fpm' needs target feature sme,sme-f8f32}} + svdot_lane_za32_mf8_vg1x4_fpm(slice, f8x4, f8, 3, fpmr); + // expected-error@+1 {{'svdot_lane_za16_mf8_vg1x2_fpm' needs target feature sme,sme-f8f16}} + svdot_lane_za16_mf8_vg1x2_fpm(slice, f8x2, f8, 3, fpmr); + // expected-error@+1 {{'svdot_lane_za16_mf8_vg1x4_fpm' needs target feature sme,sme-f8f16}} + svdot_lane_za16_mf8_vg1x4_fpm(slice, f8x4, f8, 3, fpmr); +} + +void test_imm(uint32_t slice, svmfloat8_t f8, svmfloat8x2_t f8x2, + svmfloat8x4_t f8x4, uint64_t fpmr) __arm_streaming __arm_inout("za") { +// expected-error@+1{{argument value 18446744073709551615 is outside the valid range [0, 3]}} + svdot_lane_za32_mf8_vg1x2_fpm(slice, f8x2, f8, -1, fpmr); +// expected-error@+1{{argument value 18446744073709551615 is outside the valid range [0, 3]}} + svdot_lane_za32_mf8_vg1x4_fpm(slice, f8x4, f8, -1, fpmr); +// expected-error@+1{{argument value 18446744073709551615 is outside the valid range [0, 7]}} + svdot_lane_za16_mf8_vg1x2_fpm(slice, f8x2, f8, -1, fpmr); +// expected-error@+1{{argument value 18446744073709551615 is outside the valid range [0, 7]}} + svdot_lane_za16_mf8_vg1x4_fpm(slice, f8x4, f8, -1, fpmr); + +// expected-error@+1{{argument value 4 is outside the valid range [0, 3]}} + svdot_lane_za32_mf8_vg1x2_fpm(slice, f8x2, f8, 4, fpmr); +// expected-error@+1{{argument value 4 is outside the valid range [0, 3]}} + svdot_lane_za32_mf8_vg1x4_fpm(slice, f8x4, f8, 4, fpmr); +// expected-error@+1{{argument value 8 is outside the valid range [0, 7]}} + svdot_lane_za16_mf8_vg1x2_fpm(slice, f8x2, f8, 8, fpmr); +// expected-error@+1{{argument value 8 is outside the valid range [0, 7]}} + svdot_lane_za16_mf8_vg1x4_fpm(slice, f8x4, f8, 8, fpmr); +} diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td index 0a1bd4c923b9b8..654bc64a30bd89 100644 --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -3856,6 +3856,31 @@ def int_aarch64_sve_famin_u : AdvSIMD_Pred2VectorArg_Intrinsic; def int_aarch64_neon_famax : AdvSIMD_2VectorArg_Intrinsic; def int_aarch64_neon_famin : AdvSIMD_2VectorArg_Intrinsic; + +// SME FP8 FDOT intrinsics +let TargetPrefix = "aarch64" in { + +class SME2_FP8_FDOT_LANE_VG1x2 : + DefaultAttrsIntrinsic<[], [llvm_i32_ty, + llvm_nxv16i8_ty, llvm_nxv16i8_ty, + llvm_nxv16i8_ty, + llvm_i32_ty], + [IntrInaccessibleMemOnly, IntrHasSideEffects, ImmArg<ArgIndex<4>>]>; + +class SME2_FP8_FDOT_LANE_VG1x4 : + DefaultAttrsIntrinsic<[], [llvm_i32_ty, + llvm_nxv16i8_ty, llvm_nxv16i8_ty, llvm_nxv16i8_ty, llvm_nxv16i8_ty, + llvm_nxv16i8_ty, + llvm_i32_ty], + [IntrInaccessibleMemOnly, IntrHasSideEffects, ImmArg<ArgIndex<6>>]>; + + def int_aarch64_sme_fp8_fdot_lane_za16_vg1x2 : SME2_FP8_FDOT_LANE_VG1x2; + def int_aarch64_sme_fp8_fdot_lane_za16_vg1x4 : SME2_FP8_FDOT_LANE_VG1x4; + + def int_aarch64_sme_fp8_fdot_lane_za32_vg1x2 : SME2_FP8_FDOT_LANE_VG1x2; + def int_aarch64_sme_fp8_fdot_lane_za32_vg1x4 : SME2_FP8_FDOT_LANE_VG1x4; +} + // // FP8 Intrinsics // diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td index bd36e21d1be46c..fa577cf92e99d1 100644 --- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td @@ -986,8 +986,8 @@ def LUTI4_S_4ZZT2Z : sme2_luti4_vector_vg4_strided<0b00, 0b00, "luti4">; let Predicates = [HasSMEF8F16] in { defm FVDOT_VG2_M2ZZI_BtoH : sme2p1_multi_vec_array_vg2_index_f8f16<"fvdot", 0b11, 0b110, ZZ_b_mul_r, ZPR4b8>; -defm FDOT_VG2_M2ZZI_BtoH : sme2p1_multi_vec_array_vg2_index_f8f16<"fdot", 0b11, 0b010, ZZ_b_mul_r, ZPR4b8>; -defm FDOT_VG4_M4ZZI_BtoH : sme2p1_multi_vec_array_vg4_index_f8f16<"fdot", 0b100, ZZZZ_b_mul_r, ZPR4b8>; +defm FDOT_VG2_M2ZZI_BtoH : sme2_fp8_fdot_index_za16_vg1x2<"fdot", int_aarch64_sme_fp8_fdot_lane_za16_vg1x2>; +defm FDOT_VG4_M4ZZI_BtoH : sme2_fp8_fdot_index_za16_vg1x4<"fdot", int_aarch64_sme_fp8_fdot_lane_za16_vg1x4>; defm FDOT_VG2_M2ZZ_BtoH : sme2_dot_mla_add_sub_array_vg24_single<"fdot", 0b0010001, MatrixOp16, ZZ_b, ZPR4b8>; defm FDOT_VG4_M4ZZ_BtoH : sme2_dot_mla_add_sub_array_vg24_single<"fdot", 0b0110001, MatrixOp16, ZZZZ_b, ZPR4b8>; @@ -1008,9 +1008,9 @@ defm FMOPA_MPPZZ_BtoH : sme2_fp8_fmopa_za16<"fmopa", int_aarch64_sme_fp8_fmopa_z } //[HasSMEF8F16] let Predicates = [HasSMEF8F32] in { +defm FDOT_VG2_M2ZZI_BtoS : sme2_fp8_fdot_index_za32_vg1x2<"fdot", int_aarch64_sme_fp8_fdot_lane_za32_vg1x2>; +defm FDOT_VG4_M4ZZI_BtoS : sme2_fp8_fdot_index_za32_vg1x4<"fdot", int_aarch64_sme_fp8_fdot_lane_za32_vg1x4>; -defm FDOT_VG2_M2ZZI_BtoS : sme2_multi_vec_array_vg2_index_32b<"fdot", 0b01, 0b0111, ZZ_b_mul_r, ZPR4b8, nxv16i8, null_frag>; -defm FDOT_VG4_M4ZZI_BtoS : sme2_multi_vec_array_vg4_index_32b<"fdot", 0b0001, ZZZZ_b_mul_r, ZPR4b8, nxv16i8, null_frag>; defm FDOT_VG2_M2ZZ_BtoS : sme2_dot_mla_add_sub_array_vg24_single<"fdot", 0b0010011, MatrixOp32, ZZ_b, ZPR4b8>; defm FDOT_VG4_M4ZZ_BtoS : sme2_dot_mla_add_sub_array_vg24_single<"fdot", 0b0110011, MatrixOp32, ZZZZ_b, ZPR4b8>; diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td index b62ffcbebc652a..9f25749c83db83 100644 --- a/llvm/lib/Target/AArch64/SMEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td @@ -244,6 +244,7 @@ class SME2_Tile_Movaz_Pat<string name, SDPatternOperator intrinsic, ValueType ou : Pat<(out_vt (intrinsic tile_imm:$tile, (i32 (tileslice MatrixIndexGPR32Op12_15:$base, index_ty:$offset)))), (!cast<Instruction>(name # _PSEUDO) $tile, $base, $offset)>; + //===----------------------------------------------------------------------===// // SME pattern match helpers. //===----------------------------------------------------------------------===// @@ -5793,3 +5794,91 @@ multiclass sme2_fmop4a_fp8_fp16_2way<string mnemonic> { // Multiple vectors def _M2Z2Z_BtoH : sme2_fp8_fp16_quarter_tile_outer_product<0b1, 0b1, mnemonic, ZZ_b_mul_r_Lo, ZZ_b_mul_r_Hi>; } + +// FP8 SME FDOT instructions + +multiclass sme2_fp8_fdot_index_za16_vg1x2<string mnemonic, + SDPatternOperator intrinsic> { + def NAME : sme2_multi_vec_array_vg2_index<0b11, {0b0,?,?,0b10,?}, MatrixOp16, + ZZ_b_mul_r, ZPR4b8, + VectorIndexH32b_timm, mnemonic>, + SMEPseudo2Instr<NAME, 1>{ + let Uses=[FPMR, FPCR]; + + bits<3> i; + let Inst{11-10} = i{2-1}; + let Inst{3} = i{0}; + } + + def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm3], $Zn, $Zm$i", + (!cast<Instruction>(NAME) MatrixOp16:$ZAda, MatrixIndexGPR32Op8_11:$Rv, sme_elm_idx0_7:$imm3, + ZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexH32b_timm:$i), 0>; + + + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, sme_elm_idx0_7, ZZ_b_mul_r, ZPR4b8, VectorIndexH32b_timm, SMEMatrixArray>; + + def : SME2_ZA_TwoOp_VG2_Multi_Index_Pat<NAME, intrinsic, sme_elm_idx0_7, ZPR4b8, nxv16i8, VectorIndexH32b_timm, tileslice16>; +} + +multiclass sme2_fp8_fdot_index_za16_vg1x4<string mnemonic, + SDPatternOperator intrinsic> { + def NAME : sme2_multi_vec_array_vg4_index<0b0, {0b1,?,?,0b100,?}, MatrixOp16, + ZZZZ_b_mul_r, ZPR4b8, + VectorIndexH32b_timm, mnemonic>, + SMEPseudo2Instr<NAME, 1> { + let Uses=[FPMR, FPCR]; + + bits<3> i; + let Inst{11-10} = i{2-1}; + let Inst{3} = i{0}; + } + + def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm3], $Zn, $Zm$i", + (!cast<Instruction>(NAME) MatrixOp16:$ZAda, MatrixIndexGPR32Op8_11:$Rv, + sme_elm_idx0_7:$imm3, ZZZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexH32b_timm:$i), 0>; + + + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, sme_elm_idx0_7, ZZZZ_b_mul_r, ZPR4b8, VectorIndexH32b_timm, SMEMatrixArray>; + + def : SME2_ZA_TwoOp_VG4_Multi_Index_Pat<NAME, intrinsic, sme_elm_idx0_7, ZPR4b8, nxv16i8, VectorIndexH32b_timm, tileslice16>; +} + +multiclass sme2_fp8_fdot_index_za32_vg1x2<string mnemonic, + SDPatternOperator intrinsic> { + def NAME : sme2_multi_vec_array_vg2_index<0b01, {0b0,?,?,0b111}, MatrixOp32, ZZ_b_mul_r, ZPR4b8, + VectorIndexS32b_timm, mnemonic>, + SMEPseudo2Instr<NAME, 1> { + let Uses=[FPMR, FPCR]; + + bits<2> i; + let Inst{11-10} = i; + } + + def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm3], $Zn, $Zm$i", + (!cast<Instruction>(NAME) MatrixOp32:$ZAda, MatrixIndexGPR32Op8_11:$Rv, sme_elm_idx0_7:$imm3, + ZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexS32b_timm:$i), 0>; + + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, sme_elm_idx0_7, ZZ_b_mul_r, ZPR4b8, VectorIndexS32b_timm, SMEMatrixArray>; + + def : SME2_ZA_TwoOp_VG2_Multi_Index_Pat<NAME, intrinsic, sme_elm_idx0_7, ZPR4b8, nxv16i8, VectorIndexS32b_timm, tileslice16>; +} + +multiclass sme2_fp8_fdot_index_za32_vg1x4<string mnemonic, + SDPatternOperator intrinsic> { + def NAME : sme2_multi_vec_array_vg4_index<0b1, {0b0,?,?,0b0,0b001}, MatrixOp32, ZZZZ_b_mul_r, + ZPR4b8, VectorIndexS32b_timm, mnemonic>, + SMEPseudo2Instr<NAME, 1> { + let Uses=[FPMR, FPCR]; + + bits<2> i; + let Inst{11-10} = i; + } + + def : InstAlias<mnemonic # "\t$ZAda[$Rv, $imm3], $Zn, $Zm$i", + (!cast<Instruction>(NAME) MatrixOp32:$ZAda, MatrixIndexGPR32Op8_11:$Rv, sme_elm_idx0_7:$imm3, + ZZZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexS32b_timm:$i), 0>; + + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo<NAME, sme_elm_idx0_7, ZZZZ_b_mul_r, ZPR4b8, VectorIndexS32b_timm, SMEMatrixArray>; + + def : SME2_ZA_TwoOp_VG4_Multi_Index_Pat<NAME, intrinsic, sme_elm_idx0_7, ZPR4b8, nxv16i8, VectorIndexS32b_timm, tileslice16>; +} diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-fp8-fdot.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-fp8-fdot.ll new file mode 100644 index 00000000000000..7fcbc328aa085e --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-fp8-fdot.ll @@ -0,0 +1,59 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --filter-out "^[ \t]*//.*$" --version 4 +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2,+sme-f8f16,+sme-f8f32 -verify-machineinstrs -force-streaming < %s | FileCheck %s + +target triple = "aarch64-linux" + +define void @test_fdot16_1x2_indexed(i32 %slice.0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zm) #0 { +; CHECK-LABEL: test_fdot16_1x2_indexed: +; CHECK: mov w8, w0 +; CHECK: fdot za.h[w8, 7, vgx2], { z0.b, z1.b }, z2.b[1] +; CHECK: ret + %slice = add i32 %slice.0, 7 + call void @llvm.aarch64.sme.fp8.fdot.lane.za16.vg1x2(i32 %slice, + <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, + <vscale x 16 x i8> %zm, i32 1) + ret void +} + +define void @test_fdot16_1x4_indexed(i32 %slice.0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zn4, +; CHECK-LABEL: test_fdot16_1x4_indexed: +; CHECK: mov w8, w0 +; CHECK: fdot za.h[w8, 7, vgx4], { z0.b - z3.b }, z4.b[1] +; CHECK: ret + <vscale x 16 x i8> %zm) #0 { + %slice = add i32 %slice.0, 7 + call void @llvm.aarch64.sme.fp8.fdot.lane.za16.vg1x4(i32 %slice, + <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zn4, + <vscale x 16 x i8> %zm, i32 1) + ret void +} + +define void @test_fdot32_1x2_indexed(i32 %slice.0, +; CHECK-LABEL: test_fdot32_1x2_indexed: +; CHECK: mov w8, w0 +; CHECK: fdot za.s[w8, 7, vgx2], { z0.b, z1.b }, z2.b[1] +; CHECK: ret + <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, + <vscale x 16 x i8> %zm) #0 { + %slice = add i32 %slice.0, 7 + call void @llvm.aarch64.sme.fp8.fdot.lane.za32.vg1x2(i32 %slice, + <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, + <vscale x 16 x i8> %zm, i32 1) + ret void +} + +define void @test_fdot32_1x4_indexed(i32 %slice.0, +; CHECK-LABEL: test_fdot32_1x4_indexed: +; CHECK: mov w8, w0 +; CHECK: fdot za.s[w8, 7, vgx4], { z0.b - z3.b }, z4.b[1] +; CHECK: ret + <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zn4, + <vscale x 16 x i8> %zm) #0 { + %slice = add i32 %slice.0, 7 + call void @llvm.aarch64.sme.fp8.fdot.lane.za32.vg1x4(i32 %slice, + <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3, <vscale x 16 x i8> %zn4, + <vscale x 16 x i8> %zm, i32 1) + ret void +} + +attributes #0 = { "target-features" = "+sme,+sme-f8f32,+sme-f8f16" } _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits