Hi,
As subject, this patch rewrites integer mls Neon intrinsics to use
a - b * c rather than inline assembly code, allowing for better
scheduling and optimization.
Regression tested and bootstrapped on aarch64-none-linux-gnu - no
issues.
If ok, please commit to master (I don't have commit rights.)
Thanks,
Jonathan
---
gcc/Changelog:
2021-01-14 Jonathan Wright <jonathan.wri...@arm.com>
* config/aarch64/arm_neon.h (vmls_s8): Use C rather than asm.
(vmls_s16): Likewise.
(vmls_s32): Likewise.
(vmls_u8): Likewise.
(vmls_u16): Likewise.
(vmls_u32): Likewise.
(vmlsq_s8): Likewise.
(vmlsq_s16): Likewise.
(vmlsq_s32): Likewise.
(vmlsq_u8): Likewise.
(vmlsq_u16): Likewise.
(vmlsq_u32): Likewise.
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 608e582d25820062a409310e7f3fc872660f8041..ad04eab1e753aa86f20a8f6cc2717368b1840ef7 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -7968,72 +7968,45 @@ __extension__ extern __inline int8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
{
- int8x8_t __result;
- __asm__ ("mls %0.8b,%2.8b,%3.8b"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ uint8x8_t __result = (uint8x8_t) __a - (uint8x8_t) __b * (uint8x8_t) __c;
+ return (int8x8_t) __result;
}
__extension__ extern __inline int16x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
{
- int16x4_t __result;
- __asm__ ("mls %0.4h,%2.4h,%3.4h"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ uint16x4_t __result = (uint16x4_t) __a - (uint16x4_t) __b * (uint16x4_t) __c;
+ return (int16x4_t) __result;
}
__extension__ extern __inline int32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
{
- int32x2_t __result;
- __asm__ ("mls %0.2s,%2.2s,%3.2s"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ uint32x2_t __result = (uint32x2_t) __a - (uint32x2_t) __b * (uint32x2_t) __c;
+ return (int32x2_t) __result;
}
__extension__ extern __inline uint8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
{
- uint8x8_t __result;
- __asm__ ("mls %0.8b,%2.8b,%3.8b"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __a - __b * __c;
}
__extension__ extern __inline uint16x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
{
- uint16x4_t __result;
- __asm__ ("mls %0.4h,%2.4h,%3.4h"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __a - __b * __c;
}
__extension__ extern __inline uint32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
{
- uint32x2_t __result;
- __asm__ ("mls %0.2s,%2.2s,%3.2s"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __a - __b * __c;
}
#define vmlsl_high_lane_s16(a, b, c, d) \
@@ -8565,72 +8538,45 @@ __extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
{
- int8x16_t __result;
- __asm__ ("mls %0.16b,%2.16b,%3.16b"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ uint8x16_t __result = (uint8x16_t) __a - (uint8x16_t) __b * (uint8x16_t) __c;
+ return (int8x16_t) __result;
}
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
{
- int16x8_t __result;
- __asm__ ("mls %0.8h,%2.8h,%3.8h"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ uint16x8_t __result = (uint16x8_t) __a - (uint16x8_t) __b * (uint16x8_t) __c;
+ return (int16x8_t) __result;
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
{
- int32x4_t __result;
- __asm__ ("mls %0.4s,%2.4s,%3.4s"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ uint32x4_t __result = (uint32x4_t) __a - (uint32x4_t) __b * (uint32x4_t) __c;
+ return (int32x4_t) __result;
}
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
{
- uint8x16_t __result;
- __asm__ ("mls %0.16b,%2.16b,%3.16b"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __a - __b * __c;
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
{
- uint16x8_t __result;
- __asm__ ("mls %0.8h,%2.8h,%3.8h"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __a - __b * __c;
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
{
- uint32x4_t __result;
- __asm__ ("mls %0.4s,%2.4s,%3.4s"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __a - __b * __c;
}
__extension__ extern __inline int16x8_t