http://www.cs.fsu.edu/~baker/devices/lxr/http/source/linux/arch/x86/kernel/cpu/common.c

122 
123 void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
124 {
125         unsigned int n, dummy, ecx, edx, l2size;
126 
127         n = cpuid_eax(0x80000000);
128 
129         if (n >= 0x80000005) {
130                 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
131                 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
132                         edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
133                 c->x86_cache_size=(ecx>>24)+(edx>>24);  
134         }
135 
136         if (n < 0x80000006)     /* Some chips just has a large L1. */
137                 return;
138 
139         ecx = cpuid_ecx(0x80000006);
140         l2size = ecx >> 16;
141         
142         /* do processor-specific cache resizing */
143         if (this_cpu->c_size_cache)
144                 l2size = this_cpu->c_size_cache(c,l2size);
145 
146         /* Allow user to override all this if necessary. */
147         if (cachesize_override != -1)
148                 l2size = cachesize_override;
149 
150         if ( l2size == 0 )
151                 return;         /* Again, no L2 cache is possible */
152 
153         c->x86_cache_size = l2size;
154 
155         printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
156                l2size, ecx & 0xFF);
157 }
158 
159 /* Naming convention should be: <Name> [(<Codename>)] */
160 /* This table only is used unless init_<vendor>() below doesn't set it; */
161 /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
162 
163 /* Look up CPU names by table lookup. */
164 static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
165 {
166         struct cpu_model_info *info;
167 
168         if ( c->x86_model >= 16 )
169                 return NULL;    /* Range check */
170 
171         if (!this_cpu)
172                 return NULL;
173 
174         info = this_cpu->c_models;
175 
176         while (info && info->family) {
177                 if (info->family == c->x86)
178                         return info->model_names[c->x86_model];
179                 info++;
180         }
181         return NULL;            /* Not found */
182 }
183 
184 
185 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
186 {
187         char *v = c->x86_vendor_id;
188         int i;
189         static int printed;
190 
191         for (i = 0; i < X86_VENDOR_NUM; i++) {
192                 if (cpu_devs[i]) {
193                         if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
194                             (cpu_devs[i]->c_ident[1] && 
195                              !strcmp(v,cpu_devs[i]->c_ident[1]))) {
196                                 c->x86_vendor = i;
197                                 if (!early)
198                                         this_cpu = cpu_devs[i];
199                                 return;
200                         }
201                 }
202         }
203         if (!printed) {
204                 printed++;
205                 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
206                 printk(KERN_ERR "CPU: Your system may be unstable.\n");
207         }
208         c->x86_vendor = X86_VENDOR_UNKNOWN;
209         this_cpu = &default_cpu;
210 }
211 
212 
213 static int __init x86_fxsr_setup(char * s)
214 {
215         setup_clear_cpu_cap(X86_FEATURE_FXSR);
216         setup_clear_cpu_cap(X86_FEATURE_XMM);
217         return 1;
218 }
219 __setup("nofxsr", x86_fxsr_setup);
220 
221 
222 static int __init x86_sep_setup(char * s)
223 {
224         setup_clear_cpu_cap(X86_FEATURE_SEP);
225         return 1;
226 }
227 __setup("nosep", x86_sep_setup);
228 



311 /* Do minimum CPU detection early.
312    Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
313    The others are not touched to avoid unwanted side effects.
314 
315    WARNING: this function is only called on the BP.  Don't add code here
316    that is supposed to run on all CPUs. */
317 static void __init early_cpu_detect(void)
318 {
319         struct cpuinfo_x86 *c = &boot_cpu_data;
320 
321         c->x86_cache_alignment = 32;
322         c->x86_clflush_size = 32;
323 
324         if (!have_cpuid_p())
325                 return;
326 
327         cpu_detect(c);
328 
329         get_cpu_vendor(c, 1);
330 
331         switch (c->x86_vendor) {
332         case X86_VENDOR_AMD:
333                 early_init_amd(c);
334                 break;
335         case X86_VENDOR_INTEL:
336                 early_init_intel(c);
337                 break;
338         }
339 
340         early_get_cap(c);
341 }
342 



Notice how the above function have evolved to the following (2.6.31-rc9):

    514 void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
    515 {
    516         /* Get vendor name */
    517         cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
    518               (unsigned int *)&c->x86_vendor_id[0],
    519               (unsigned int *)&c->x86_vendor_id[8],
    520               (unsigned int *)&c->x86_vendor_id[4]);
    521 
    522         c->x86 = 4;
    523         /* Intel-defined flags: level 0x00000001 */
    524         if (c->cpuid_level >= 0x00000001) {
    525                 u32 junk, tfms, cap0, misc;
    526 
    527                 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
    528                 c->x86 = (tfms >> 8) & 0xf;
    529                 c->x86_model = (tfms >> 4) & 0xf;
    530                 c->x86_mask = tfms & 0xf;
    531 
    532                 if (c->x86 == 0xf)
    533                         c->x86 += (tfms >> 20) & 0xff;
    534                 if (c->x86 >= 0x6)
    535                         c->x86_model += ((tfms >> 16) & 0xf) << 4;
    536 
    537                 if (cap0 & (1<<19)) {
    538                         c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
    539                         c->x86_cache_alignment = c->x86_clflush_size;
    540                 }
    541         }
    542 }


    611 /*
    612  * Do minimum CPU detection early.
    613  * Fields really needed: vendor, cpuid_level, family, model, mask,
    614  * cache alignment.
    615  * The others are not touched to avoid unwanted side effects.
    616  *
    617  * WARNING: this function is only called on the BP.  Don't add code here
    618  * that is supposed to run on all CPUs.
    619  */
    620 static void __init early_identify_cpu(struct cpuinfo_x86 *c)
    621 {
    622 #ifdef CONFIG_X86_64
    623         c->x86_clflush_size = 64;
    624         c->x86_phys_bits = 36;
    625         c->x86_virt_bits = 48;
    626 #else
    627         c->x86_clflush_size = 32;
    628         c->x86_phys_bits = 32;
    629         c->x86_virt_bits = 32;
    630 #endif
    631         c->x86_cache_alignment = c->x86_clflush_size;
    632 
    633         memset(&c->x86_capability, 0, sizeof c->x86_capability);
    634         c->extended_cpuid_level = 0;
    635 
    636         if (!have_cpuid_p())
    637                 identify_cpu_without_cpuid(c);
    638 
    639         /* cyrix could have cpuid enabled via c_identify()*/
    640         if (!have_cpuid_p())
    641                 return;
    642 
    643         cpu_detect(c);
    644 
    645         get_cpu_vendor(c);
    646 
    647         get_cpu_cap(c);
    648 
    649         if (this_cpu->c_early_init)
    650                 this_cpu->c_early_init(c);
    651 
    652 #ifdef CONFIG_SMP
    653         c->cpu_index = boot_cpu_id;
    654 #endif
    655         filter_cpuid_features(c, false);
    656 }
    657 


Reply via email to