https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102926

--- Comment #5 from Ard Biesheuvel <ardb at kernel dot org> ---
Actually, I can reproduce the same behavior without the TLS register, where the
result of a MOVW/MOVT immediate assignment is also spilled to the stack.

    int ll[2];
    int foo(int);
    int bar(void)
    {
        int *l = ll;

        return foo(l[0]) + l[1];
    }

will produce this for armv7-a (with [r4-r11] fixed)

        push    {fp, lr}
        movw    r3, #:lower16:.LANCHOR0
        movt    r3, #:upper16:.LANCHOR0
        add     fp, sp, #4
        sub     sp, sp, #8
        ldr     r0, [r3]
        str     r3, [fp, #-8]
        bl      foo
        ldr     r3, [fp, #-8]
        ldr     r3, [r3, #4]
        add     r0, r0, r3
        sub     sp, fp, #4
        @ sp needed
        pop     {fp, pc}

whereas pre-v7 gives me

        ldr     r3, .L4
        push    {fp, lr}
        add     fp, sp, #4
        ldr     r0, [r3]
        bl      foo
        ldr     r3, .L4
        ldr     r3, [r3, #4]
        add     r0, r0, r3
        pop     {fp, pc}

i.e., the address is simply reloaded from the literal pool rather than spilled.

Reply via email to