https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83543
--- Comment #6 from Martin Sebor <msebor at gcc dot gnu.org> ---
(In reply to Martin Sebor from comment #5)
The cutoff between (a) and (b) also depends on whether or not a function is
being optimized for speed or for size, so at -Os, the x86_64 target also uses
(b) for all sizes.
Here's a test case and its output with an x86_64 compiler:
$ cat t.c && gcc -S -Wall -fdump-tree-optimized=/dev/stdout t.c
#pragma GCC optimize ("O2")
int f (void)
{
struct A { char a[4]; } a = { "123" };
return __builtin_strlen (a.a);
}
#pragma GCC optimize ("Os")
int g (void)
{
struct B { char b[4]; } b = { "123" };
return __builtin_strlen (b.b);
}
;; Function f (f, funcdef_no=0, decl_uid=1957, cgraph_uid=0, symbol_order=0)
__attribute__((optimize ("O2")))
f ()
{
<bb 2> [local count: 1073741825]:
return 3;
}
;; Function g (g, funcdef_no=1, decl_uid=1963, cgraph_uid=1, symbol_order=1)
__attribute__((optimize ("O2", "Os")))
g ()
{
struct B b;
long unsigned int _1;
int _4;
<bb 2> [local count: 1073741825]:
b.b = "123";
_1 = __builtin_strlen (&b.b);
_4 = (int) _1;
b ={v} {CLOBBER};
return _4;
}