|
85 /*
Used for HOTPLUG */ 86 #define __devinit __section(.devinit.text) __cold 87 #define __devinitdata __section(.devinit.data) 88 #define __devinitconst __section(.devinit.rodata) 89 #define __devexit __section(.devexit.text) __exitused __cold 90 #define __devexitdata __section(.devexit.data) 91 #define __devexitconst __section(.devexit.rodata) 92 93 /* Used for HOTPLUG_CPU */ 94 #define __cpuinit __section(.cpuinit.text) __cold 95 #define __cpuinitdata __section(.cpuinit.data) 96 #define __cpuinitconst __section(.cpuinit.rodata) 97 #define __cpuexit __section(.cpuexit.text) __exitused __cold 98 #define __cpuexitdata __section(.cpuexit.data) 99 #define __cpuexitconst __section(.cpuexit.rodata) 100 101 /* Used for MEMORY_HOTPLUG */ 102 #define __meminit __section(.meminit.text) __cold 103 #define __meminitdata __section(.meminit.data) 104 #define __meminitconst __section(.meminit.rodata) 105 #define __memexit __section(.memexit.text) __exitused __cold 106 #define __memexitdata __section(.memexit.data) 107 #define __memexitconst __section(.memexit.rodata) 108 109 /* For assembly routines */ 110 #define __HEAD .section ".head.text","ax" 111 #define __INIT .section ".init.text","ax" 112 #define __FINIT .previous 113 114 #define __INITDATA .section ".init.data","aw" 115 #define __INITRODATA .section ".init.rodata","a" 116 #define __FINITDATA .previous 117 118 #define __DEVINIT .section ".devinit.text", "ax" 119 #define __DEVINITDATA .section ".devinit.data", "aw" 120 #define __DEVINITRODATA .section ".devinit.rodata", "a" 121 122 #define __CPUINIT .section ".cpuinit.text", "ax" 123 #define __CPUINITDATA .section ".cpuinit.data", "aw" ============================== 163 /* initcalls are now grouped by functionality into separate 164 * subsections. Ordering inside the subsections is determined 165 * by link order. 166 * For backwards compatibility, initcall() puts the call in 167 * the device init subsection. 168 * 169 * The `id' arg to __define_initcall() is needed so that multiple initcalls 170 * can point at the same handler without causing duplicate-symbol build errors. 171 */ 172 173 #define __define_initcall(level,fn,id) \ 174 static initcall_t __initcall_##fn##id __used \ 175 __attribute__((__section__(".initcall" level ".init"))) = fn 176 177 /* 178 * Early initcalls run before initializing SMP. 179 * =========================== 187 * 188 * This only exists for built-in code, not for modules. 189 */ 190 #define pure_initcall(fn) __define_initcall("0",fn,0) 191 192 #define core_initcall(fn) __define_initcall("1",fn,1) 193 #define core_initcall_sync(fn) __define_initcall("1s",fn,1s) 194 #define postcore_initcall(fn) __define_initcall("2",fn,2) 195 #define postcore_initcall_sync(fn) __define_initcall("2s",fn,2s) 196 #define arch_initcall(fn) __define_initcall("3",fn,3) 197 #define arch_initcall_sync(fn) __define_initcall("3s",fn,3s) 198 #define subsys_initcall(fn) __define_initcall("4",fn,4) 199 #define subsys_initcall_sync(fn) __define_initcall("4s",fn,4s) 200 #define fs_initcall(fn) __define_initcall("5",fn,5) 201 #define fs_initcall_sync(fn) __define_initcall("5s",fn,5s) 202 #define rootfs_initcall(fn) __define_initcall("rootfs",fn,rootfs) 203 #define device_initcall(fn) __define_initcall("6",fn,6) 204 #define device_initcall_sync(fn) __define_initcall("6s",fn,6s) 205 #define late_initcall(fn) __define_initcall("7",fn,7) 206 #define late_initcall_sync(fn) __define_initcall("7s",fn,7s) 207 208 #define __initcall(fn) device_initcall(fn) 209 ============================ 701 int do_one_initcall(initcall_t fn) 702 { 703 int count = preempt_count(); 704 ktime_t calltime, delta, rettime; 705 char msgbuf[64]; 706 struct boot_trace_call call; 707 struct boot_trace_ret ret; 708 709 if (initcall_debug) { 710 call.caller = task_pid_nr(current); 711 printk("calling %pF @ %i\n", fn, call.caller); 712 calltime = ktime_get(); 713 trace_boot_call(&call, fn); 714 enable_boot_trace(); 715 } 716 717 ret.result = fn(); 718 719 if (initcall_debug) { 720 disable_boot_trace(); 721 rettime = ktime_get(); 722 delta = ktime_sub(rettime, calltime); 723 ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10; 724 trace_boot_ret(&ret, fn); 725 printk("initcall %pF returned %d after %Ld usecs\n", fn, 726 ret.result, ret.duration); 727 } 728 729 msgbuf[0] = 0; 730 731 if (ret.result && ret.result != -ENODEV && initcall_debug) 732 sprintf(msgbuf, "error code %d ", ret.result); 733 734 if (preempt_count() != count) { 735 strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf)); 736 preempt_count() = count; 737 } 738 if (irqs_disabled()) { 739 strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf)); 740 local_irq_enable(); 741 } 742 if (msgbuf[0]) { 743 printk("initcall %pF returned with %s\n", fn, msgbuf); 744 } 745 746 return ret.result; 747 } =================== 750 extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[]; 751 752 static void __init do_initcalls(void) 753 { 754 initcall_t *call; 755 756 for (call = __early_initcall_end; call < __initcall_end; call++) 757 do_one_initcall(*call); 758 759 /* Make sure there is no pending stuff from the initcall sequence */ 760 flush_scheduled_work(); 761 } 762 ================== 763 /* 764 * Ok, the machine is now initialized. None of the devices 765 * have been touched yet, but the CPU subsystem is up and 766 * running, and memory and process management works. 767 * 768 * Now we can finally start doing some real work.. 769 */ 770 static void __init do_basic_setup(void) 771 { 772 rcu_init_sched(); /* needed by module_init stage. */ 773 init_workqueues(); 774 cpuset_init_smp(); 775 usermodehelper_init(); 776 driver_init(); 777 init_irq_proc(); 778 do_initcalls(); 779 } ============================ 842 static int __init kernel_init(void * unused) 843 { 844 lock_kernel(); 845 /* 846 * init can run on any cpu. 847 */ 848 set_cpus_allowed_ptr(current, cpu_all_mask); 849 /* 850 * Tell the world that we're going to be the grim 851 * reaper of innocent orphaned children. 852 * 853 * We don't want people to have to make incorrect 854 * assumptions about where in the task array this 855 * can be found. 856 */ 857 init_pid_ns.child_reaper = current; 858 859 cad_pid = task_pid(current); 860 861 smp_prepare_cpus(setup_max_cpus); 862 863 do_pre_smp_initcalls(); 864 start_boot_trace(); 865 866 smp_init(); 867 sched_init_smp(); 868 869 do_basic_setup(); 870 ========================= 451 static noinline void __init_refok rest_init(void) 452 __releases(kernel_lock) 453 { 454 int pid; 455 456 kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); 457 numa_default_policy(); 458 pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); 459 kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); 460 unlock_kernel(); 461 ========================= 531 asmlinkage void __init start_kernel(void) 532 { 533 char * command_line; 534 extern struct kernel_param __start___param[], __stop___param[]; 535 536 smp_setup_processor_id(); 537 538 /* 539 * Need to run as early as possible, to initialize the 540 * lockdep hash: 541 */ 542 lockdep_init(); 543 debug_objects_early_init(); 544 545 /* 546 * Set up the the initial canary ASAP: 547 */ <snip> <snip> <snip> 678 /* rootfs populating might need page-writeback */ 679 page_writeback_init(); 680 #ifdef CONFIG_PROC_FS 681 proc_root_init(); 682 #endif 683 cgroup_init(); 684 cpuset_init(); 685 taskstats_init_early(); 686 delayacct_init(); 687 688 check_bugs(); 689 690 acpi_early_init(); /* before LAPIC and SMP init */ 691 692 ftrace_init(); 693 694 /* Do the rest non-__init'ed, we're now alive */ 695 rest_init(); 696 } 697 |
