http://gcc.gnu.org/bugzilla/show_bug.cgi?id=57856
Bug ID: 57856
Summary: for an uninitialized variable, gcc assumes it already
has value instead of report uninitialized warnings.
Product: gcc
Version: 4.9.0
Status: UNCONFIRMED
Severity: normal
Priority: P3
Component: c
Assignee: unassigned at gcc dot gnu.org
Reporter: gang.chen at asianux dot com
Created attachment 30477
--> http://gcc.gnu.org/bugzilla/attachment.cgi?id=30477&action=edit
Related disassemble code.
For Linux kernel source code "mm/vmscan.c", function putback_lru_page(),
version is next-20130621.
Gcc assumes "lru == LRU_UNEVICTABLE" instead of report warnings (uninitializing
lru).
I got gcc source code from svn, "configure && make && make install".
[root@gchenlinux linux-next]# which gcc
/usr/local/bin/gcc
[root@gchenlinux linux-next]# gcc -v
Using built-in specs.
COLLECT_GCC=gcc
COLLECT_LTO_WRAPPER=/usr/local/libexec/gcc/x86_64-unknown-linux-gnu/4.9.0/lto-wrapper
Target: x86_64-unknown-linux-gnu
Configured with: ./configure
Thread model: posix
gcc version 4.9.0 20130704 (experimental) (GCC)
The related source code:
580 void putback_lru_page(struct page *page)
581 {
582 int lru;
583 int was_unevictable = PageUnevictable(page);
584
585 VM_BUG_ON(PageLRU(page));
586
587 redo:
588 ClearPageUnevictable(page);
589
590 if (page_evictable(page)) {
591 /*
592 * For evictable pages, we can use the cache.
593 * In event of a race, worst case is we end up with an
594 * unevictable page on [in]active list.
595 * We know how to handle that.
596 */
597 lru_cache_add(page);
598 } else {
599 /*
600 * Put unevictable pages directly on zone's unevictable
601 * list.
602 */
603 lru = LRU_UNEVICTABLE;
604 add_page_to_unevictable_list(page);
605 /*
606 * When racing with an mlock or AS_UNEVICTABLE clearing
607 * (page is unlocked) make sure that if the other thread
608 * does not observe our setting of PG_lru and fails
609 * isolation/check_move_unevictable_pages,
610 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
611 * the page back to the evictable list.
612 *
613 * The other side is TestClearPageMlocked() or
shmem_lock().
614 */
615 smp_mb();
616 }
617
618 /*
619 * page's status can change while we move it among lru. If an
evictable
620 * page is on unevictable list, it never be freed. To avoid that,
621 * check after we added it to the list, again.
622 */
623 if (lru == LRU_UNEVICTABLE && page_evictable(page)) {
624 if (!isolate_lru_page(page)) {
625 put_page(page);
626 goto redo;
627 }
628 /* This means someone else dropped this page from LRU
629 * So, it will be freed or putback to LRU again. There is
630 * nothing to do here.
631 */
632 }
633
634 if (was_unevictable && lru != LRU_UNEVICTABLE)
635 count_vm_event(UNEVICTABLE_PGRESCUED);
636 else if (!was_unevictable && lru == LRU_UNEVICTABLE)
637 count_vm_event(UNEVICTABLE_PGCULLED);
638
639 put_page(page); /* drop ref from isolate */
640 }
/*
* Related disassemble code:
* make defconfig under x86_64 PC.
* make menuconfig (choose "Automount devtmpfs at /dev..." and KGDB)
* make V=1 EXTRA_CFLAGS=-W (not find related warnings, ref warn.log in
attachment)
* objdump -d vmlinux > vmlinux.S
* vi vmlinux.S
*
* The issue is: compiler assumes "lru == LRU_UNEVICTABLE" instead of report
warnings (uninitializing lru)
*/
ffffffff810f3d20 <putback_lru_page>:
ffffffff810f3d20: 55 push %rbp
ffffffff810f3d21: 48 89 e5 mov %rsp,%rbp
ffffffff810f3d24: 41 55 push %r13
ffffffff810f3d26: 41 54 push %r12
ffffffff810f3d28: 4c 8d 67 02 lea 0x2(%rdi),%r12 ; for
ClearPageUnevictable(page);
ffffffff810f3d2c: 53 push %rbx
ffffffff810f3d2d: 4c 8b 2f mov (%rdi),%r13 ;
was_unevictable = PageUnevictable(page);
ffffffff810f3d30: 48 89 fb mov %rdi,%rbx
ffffffff810f3d33: 49 c1 ed 14 shr $0x14,%r13
ffffffff810f3d37: 41 83 e5 01 and $0x1,%r13d
ffffffff810f3d3b: eb 28 jmp ffffffff810f3d65
<putback_lru_page+0x45>
ffffffff810f3d3d: 0f 1f 00 nopl (%rax)
/* if(page_evictable(page)) { */
ffffffff810f3d40: e8 db c7 ff ff callq ffffffff810f0520
<lru_cache_add>
/* } */
/* if (lru == LRU_UNEVICTABLE && page_evictable(page)) { */
; assmue lru ==
LRU_UNEVICTABLE
ffffffff810f3d45: 48 89 df mov %rbx,%rdi
ffffffff810f3d48: e8 a3 ff ff ff callq ffffffff810f3cf0
<page_evictable>
ffffffff810f3d4d: 85 c0 test %eax,%eax
ffffffff810f3d4f: 74 3d je ffffffff810f3d8e
<putback_lru_page+0x6e>
ffffffff810f3d51: 48 89 df mov %rbx,%rdi
ffffffff810f3d54: e8 87 fb ff ff callq ffffffff810f38e0
<isolate_lru_page>
ffffffff810f3d59: 85 c0 test %eax,%eax
ffffffff810f3d5b: 75 31 jne ffffffff810f3d8e
<putback_lru_page+0x6e>
ffffffff810f3d5d: 48 89 df mov %rbx,%rdi
ffffffff810f3d60: e8 3b c1 ff ff callq ffffffff810efea0
<put_page>
/* redo: */
/* ClearPageUnevictable(page); */
ffffffff810f3d65: f0 41 80 24 24 ef lock andb $0xef,(%r12)
/* if(page_evictable(page)) { */
ffffffff810f3d6b: 48 89 df mov %rbx,%rdi
ffffffff810f3d6e: e8 7d ff ff ff callq ffffffff810f3cf0
<page_evictable>
ffffffff810f3d73: 85 c0 test %eax,%eax
ffffffff810f3d75: 48 89 df mov %rbx,%rdi
ffffffff810f3d78: 75 c6 jne ffffffff810f3d40
<putback_lru_page+0x20>
/* else { */
; assume lru ==
LRU_UNEVICTABLE
ffffffff810f3d7a: e8 c1 c7 ff ff callq ffffffff810f0540
<add_page_to_unevictable_list>
ffffffff810f3d7f: 0f ae f0 mfence
/* } */
/* if (lru == LRU_UNEVICTABLE && page_evictable(page)) { */
; assmue lru ==
LRU_UNEVICTABLE
ffffffff810f3d82: 48 89 df mov %rbx,%rdi
ffffffff810f3d85: e8 66 ff ff ff callq ffffffff810f3cf0
<page_evictable>
ffffffff810f3d8a: 85 c0 test %eax,%eax
ffffffff810f3d8c: 75 c3 jne ffffffff810f3d51
<putback_lru_page+0x31>
/* } */
/* if (was_unevictable && lru != LRU_UNEVICTABLE) */
/* ... */
/* else if (!was_unevictable && lru == LRU_UNEVICTABLE) */
ffffffff810f3d8e: 4d 85 ed test %r13,%r13
; !was_unevictable, assmue lru == LRU_UNEVICTABLE
ffffffff810f3d91: 75 09 jne ffffffff810f3d9c
<putback_lru_page+0x7c>
ffffffff810f3d93: 65 48 ff 04 25 68 f0 incq %gs:0xf068 ;
count_vm_event(UNEVICTABLE_PGCULLED);
; "incq
%gs:0xf078" for count_vm_event(UNEVICTABLE_PGRESCUED);
ffffffff810f3d9a: 00 00
/* put_page() */
ffffffff810f3d9c: 48 89 df mov %rbx,%rdi
ffffffff810f3d9f: e8 fc c0 ff ff callq ffffffff810efea0
<put_page>
ffffffff810f3da4: 5b pop %rbx
ffffffff810f3da5: 41 5c pop %r12
ffffffff810f3da7: 41 5d pop %r13
ffffffff810f3da9: 5d pop %rbp
ffffffff810f3daa: c3 retq
ffffffff810f3dab: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
ffffffff81dc4d8a: c3 retq