Hello, This patch fixes a bug in the arm backend where GCC generates invalid LDRD instructions. The LDRD instruction requires the first transfer register to be even, but GCC attempts to use odd registers here. For example, with the following C code:
struct c { double a; } __attribute((aligned)) __attribute((packed)); struct c d; struct c f(struct c); void e() { f(d); } The struct d is passed in registers r1 and r2 to the function f, and GCC attempts to do this with a LDRD instruction when compiling with -march=armv7-a on a soft float toolchain. The fix is analogous to the corresponding one for STRD in the same function: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=52057dc4ac5295caebf83147f688d769c93cbc8d Testing: - New unit tests which pass after applying the patch. - Tested on an x64 -> arm-none-eabi cross. - Bootstrapped and regtested on arm-none-linux-gnueabihf (in both thumb and arm modes). OK for master? Thanks, Alex --- gcc/ChangeLog: 2020-05-15 Alex Coplan <alex.cop...@arm.com> * config/arm/arm.c (output_move_double): Fix codegen when loading into a register pair with an odd base register. gcc/testsuite/ChangeLog: 2020-05-15 Alex Coplan <alex.cop...@arm.com> * gcc.c-torture/compile/packed-aligned-1.c: New test. * gcc.c-torture/execute/packed-aligned.c: New test.
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c index d50781953c0..0ac1961696e 100644 --- a/gcc/config/arm/arm.c +++ b/gcc/config/arm/arm.c @@ -19551,6 +19551,7 @@ output_move_double (rtx *operands, bool emit, int *count) if (code0 == REG) { unsigned int reg0 = REGNO (operands[0]); + const bool can_ldrd = TARGET_LDRD && (TARGET_THUMB2 || (reg0 % 2 == 0)); otherops[0] = gen_rtx_REG (SImode, 1 + reg0); @@ -19562,7 +19563,7 @@ output_move_double (rtx *operands, bool emit, int *count) if (emit) { - if (TARGET_LDRD + if (can_ldrd && !(fix_cm3_ldrd && reg0 == REGNO(XEXP (operands[1], 0)))) output_asm_insn ("ldrd%?\t%0, [%m1]", operands); else @@ -19571,7 +19572,7 @@ output_move_double (rtx *operands, bool emit, int *count) break; case PRE_INC: - gcc_assert (TARGET_LDRD); + gcc_assert (can_ldrd); if (emit) output_asm_insn ("ldrd%?\t%0, [%m1, #8]!", operands); break; @@ -19579,7 +19580,7 @@ output_move_double (rtx *operands, bool emit, int *count) case PRE_DEC: if (emit) { - if (TARGET_LDRD) + if (can_ldrd) output_asm_insn ("ldrd%?\t%0, [%m1, #-8]!", operands); else output_asm_insn ("ldmdb%?\t%m1!, %M0", operands); @@ -19589,7 +19590,7 @@ output_move_double (rtx *operands, bool emit, int *count) case POST_INC: if (emit) { - if (TARGET_LDRD) + if (can_ldrd) output_asm_insn ("ldrd%?\t%0, [%m1], #8", operands); else output_asm_insn ("ldmia%?\t%m1!, %M0", operands); @@ -19597,7 +19598,7 @@ output_move_double (rtx *operands, bool emit, int *count) break; case POST_DEC: - gcc_assert (TARGET_LDRD); + gcc_assert (can_ldrd); if (emit) output_asm_insn ("ldrd%?\t%0, [%m1], #-8", operands); break; @@ -19619,6 +19620,7 @@ output_move_double (rtx *operands, bool emit, int *count) /* Registers overlap so split out the increment. */ if (emit) { + gcc_assert (can_ldrd); output_asm_insn ("add%?\t%1, %1, %2", otherops); output_asm_insn ("ldrd%?\t%0, [%1] @split", otherops); } @@ -19630,10 +19632,11 @@ output_move_double (rtx *operands, bool emit, int *count) /* Use a single insn if we can. FIXME: IWMMXT allows offsets larger than ldrd can handle, fix these up with a pair of ldr. */ - if (TARGET_THUMB2 + if (can_ldrd + && (TARGET_THUMB2 || !CONST_INT_P (otherops[2]) || (INTVAL (otherops[2]) > -256 - && INTVAL (otherops[2]) < 256)) + && INTVAL (otherops[2]) < 256))) { if (emit) output_asm_insn ("ldrd%?\t%0, [%1, %2]!", otherops); @@ -19656,10 +19659,11 @@ output_move_double (rtx *operands, bool emit, int *count) /* Use a single insn if we can. FIXME: IWMMXT allows offsets larger than ldrd can handle, fix these up with a pair of ldr. */ - if (TARGET_THUMB2 + if (can_ldrd + && (TARGET_THUMB2 || !CONST_INT_P (otherops[2]) || (INTVAL (otherops[2]) > -256 - && INTVAL (otherops[2]) < 256)) + && INTVAL (otherops[2]) < 256))) { if (emit) output_asm_insn ("ldrd%?\t%0, [%1], %2", otherops); @@ -19690,7 +19694,7 @@ output_move_double (rtx *operands, bool emit, int *count) operands[1] = otherops[0]; if (emit) { - if (TARGET_LDRD) + if (can_ldrd) output_asm_insn ("ldrd%?\t%0, [%1]", operands); else output_asm_insn ("ldmia%?\t%1, %M0", operands); @@ -19735,7 +19739,7 @@ output_move_double (rtx *operands, bool emit, int *count) } otherops[0] = gen_rtx_REG(SImode, REGNO(operands[0]) + 1); operands[1] = otherops[0]; - if (TARGET_LDRD + if (can_ldrd && (REG_P (otherops[2]) || TARGET_THUMB2 || (CONST_INT_P (otherops[2]) @@ -19796,7 +19800,7 @@ output_move_double (rtx *operands, bool emit, int *count) if (count) *count = 2; - if (TARGET_LDRD) + if (can_ldrd) return "ldrd%?\t%0, [%1]"; return "ldmia%?\t%1, %M0"; diff --git a/gcc/testsuite/gcc.c-torture/compile/packed-aligned-1.c b/gcc/testsuite/gcc.c-torture/compile/packed-aligned-1.c new file mode 100644 index 00000000000..9f0923e29ee --- /dev/null +++ b/gcc/testsuite/gcc.c-torture/compile/packed-aligned-1.c @@ -0,0 +1,11 @@ +struct c { + double a; +} __attribute((packed)) __attribute((aligned)); + +void f(struct c *, struct c); + +void g(struct c *ptr) +{ + ptr++; + f(ptr, *ptr); +} diff --git a/gcc/testsuite/gcc.c-torture/execute/packed-aligned.c b/gcc/testsuite/gcc.c-torture/execute/packed-aligned.c new file mode 100644 index 00000000000..f768af0ab02 --- /dev/null +++ b/gcc/testsuite/gcc.c-torture/execute/packed-aligned.c @@ -0,0 +1,28 @@ +struct c { + double a; +} __attribute((packed)) __attribute((aligned)); + +extern void abort(void); + +double g_expect = 32.25; + +void f(unsigned x, struct c y) +{ + if (x != 0) + abort(); + + if (y.a != g_expect) + abort(); +} + +struct c e = { 64.25 }; + +int main(void) +{ + struct c d = { 32.25 }; + f(0, d); + + g_expect = 64.25; + f(0, e); + return 0; +}