Trunk gcc mis-handles following volatile bitfield case on ARM target:
$ cat a.c
extern void check(int);
typedef struct {
volatile unsigned short a:8, b:8;
} BitStruct;
BitStruct bits = {1, 2};
int main ()
{
check(bits.a);
return 0;
}
$ arm-none-eabi-gcc -v 2>&1 |grep "gcc version"
gcc version 4.7.0 20111024 (experimental) [trunk revision 180388] (GCC)
$ arm-none-eabi-gcc -S a.c -mthumb -mcpu=cortex-m3
$ grep -v "^\s*\." a.s
bits:
main:
@ args = 0, pretend = 0, frame = 0
@ frame_needed = 1, uses_anonymous_args = 0
push {r7, lr}
add r7, sp, #0
movw r3, #:lower16:bits
movt r3, #:upper16:bits
ldrh r3, [r3, #0] @ movhi
uxth r3, r3 // Should be uxtb here. As a result,
// the output becomes 2056, instead of 8 as expected
mov r0, r3
bl check
mov r3, #0
mov r0, r3
pop {r7, pc}Root cause is that when restrict-volatile-bitfields is enabled, which is ARM default. Volatile bitfields isn't loaded as bitfield even if bitfield size is less than load size. It is actually a regression introduced by: http://gcc.gnu.org/ml/gcc-patches/2011-03/msg01477.html Patch to fix this: 2011-11-10 Joey Ye <[email protected]> Fix volatile bitfield load * gcc/expr.c (expand_expr_real_1): Check bitfield size smaller than mode size. Testcase: 2011-11-10 Joey Ye <[email protected]> * gcc.dg/volatile-bitfields-1.c: New. Index: testsuite/gcc.dg/volatile-bitfields-1.c =================================================================== --- testsuite/gcc.dg/volatile-bitfields-1.c (revision 0) +++ testsuite/gcc.dg/volatile-bitfields-1.c (revision 0) @@ -0,0 +1,23 @@ +/* { dg-options "-fstrict-volatile-bitfields" } */ +/* { dg-do run } */ + +extern int puts(const char *); +extern void abort(void) __attribute__((noreturn)); + +typedef struct { + volatile unsigned short a:8, b:8; +} BitStruct; + +BitStruct bits = {1, 2}; + +void check(int i, int j) +{ + if (i != 1 || j != 2) puts("FAIL"), abort(); +} + +int main () +{ + check(bits.a, bits.b); + + return 0; +} Index: expr.c =================================================================== --- expr.c (revision 181191) +++ expr.c (working copy) @@ -9740,11 +9740,16 @@ && modifier != EXPAND_CONST_ADDRESS && modifier != EXPAND_INITIALIZER) /* If the field is volatile, we always want an aligned - access. Only do this if the access is not already naturally + access. Do this in following two situations: + 1. the access is not already naturally aligned, otherwise "normal" (non-bitfield) volatile fields - become non-addressable. */ + become non-addressable. + 2. the bitsize is narrower than the access size. Need + to extract bitfields from the access. */ || (volatilep && flag_strict_volatile_bitfields > 0 - && (bitpos % GET_MODE_ALIGNMENT (mode) != 0)) + && (bitpos % GET_MODE_ALIGNMENT (mode) != 0 + || (mode1 != BLKmode + && bitsize < GET_MODE_SIZE (mode1) * BITS_PER_UNIT))) /* If the field isn't aligned enough to fetch as a memref, fetch it as a bit field. */ || (mode1 != BLKmode
volatile-bitfields-1110.patch
Description: Binary data
