This patch fixes the backend pattern that was printing the wrong input
scalar register pair when inserting into lane 1.
Added a new test to force float-abi=hard so we can use scan-assembler to
check
correct codegen.
Regression tested arm-none-eabi with
-march=armv8.1-m.main+mve/-mfloat-abi=hard/-mfpu=auto
gcc/ChangeLog:
PR target/115611
* config/arm/mve.md (mve_vec_setv2di_internal): Fix printing of input
scalar register pair when lane = 1.
gcc/testsuite/ChangeLog:
* gcc.target/arm/mve/intrinsics/vsetq_lane_su64.c: New test.diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md
index
4b4d6298ffb1899dc089eb52b03500e6e6236c31..706a45c7d6652677f3ec993a77646e3845eb8f8d
100644
--- a/gcc/config/arm/mve.md
+++ b/gcc/config/arm/mve.md
@@ -6505,7 +6505,7 @@ (define_insn "mve_vec_setv2di_internal"
if (elt == 0)
return "vmov\t%e0, %Q1, %R1";
else
- return "vmov\t%f0, %J1, %K1";
+ return "vmov\t%f0, %Q1, %R1";
}
[(set_attr "type" "mve_move")])
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_su64.c
b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_su64.c
new file mode 100644
index
0000000000000000000000000000000000000000..5aa3bc9a76a06d7151ff6a844807afe666bbeacb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vsetq_lane_su64.c
@@ -0,0 +1,63 @@
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-require-effective-target arm_hard_ok } */
+/* { dg-additional-options "-mfloat-abi=hard -O2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+#include "arm_mve.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+**fn1:
+** vmov d0, r0, r1
+** bx lr
+*/
+uint64x2_t
+fn1 (uint64_t a, uint64x2_t b)
+{
+ return vsetq_lane_u64 (a, b, 0);
+}
+
+/*
+**fn2:
+** vmov d1, r0, r1
+** bx lr
+*/
+uint64x2_t
+fn2 (uint64_t a, uint64x2_t b)
+{
+ return vsetq_lane_u64 (a, b, 1);
+}
+
+/*
+**fn3:
+** vmov d0, r0, r1
+** bx lr
+*/
+int64x2_t
+fn3 (int64_t a, int64x2_t b)
+{
+ return vsetq_lane_s64 (a, b, 0);
+}
+
+/*
+**fn4:
+** vmov d1, r0, r1
+** bx lr
+*/
+int64x2_t
+fn4 (int64_t a, int64x2_t b)
+{
+ return vsetq_lane_s64 (a, b, 1);
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
+/* { dg-final { scan-assembler-not "__ARM_undef" } } */
+