2 * Copyright (C) 1995 Linus Torvalds
6 * This file handles the architecture-dependent parts of initialization
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/ptrace.h>
16 #include <linux/slab.h>
17 #include <linux/user.h>
18 #include <linux/a.out.h>
19 #include <linux/screen_info.h>
20 #include <linux/ioport.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/initrd.h>
24 #include <linux/highmem.h>
25 #include <linux/bootmem.h>
26 #include <linux/module.h>
27 #include <asm/processor.h>
28 #include <linux/console.h>
29 #include <linux/seq_file.h>
30 #include <linux/crash_dump.h>
31 #include <linux/root_dev.h>
32 #include <linux/pci.h>
33 #include <linux/efi.h>
34 #include <linux/acpi.h>
35 #include <linux/kallsyms.h>
36 #include <linux/edd.h>
37 #include <linux/mmzone.h>
38 #include <linux/kexec.h>
39 #include <linux/cpufreq.h>
40 #include <linux/dmi.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/ctype.h>
43 #include <linux/uaccess.h>
46 #include <asm/uaccess.h>
47 #include <asm/system.h>
48 #include <asm/vsyscall.h>
53 #include <video/edid.h>
57 #include <asm/mpspec.h>
58 #include <asm/mmu_context.h>
59 #include <asm/proto.h>
60 #include <asm/setup.h>
61 #include <asm/mach_apic.h>
63 #include <asm/sections.h>
65 #include <asm/cacheflush.h>
68 #include <asm/topology.h>
70 #ifdef CONFIG_PARAVIRT
71 #include <asm/paravirt.h>
80 struct cpuinfo_x86 boot_cpu_data __read_mostly;
81 EXPORT_SYMBOL(boot_cpu_data);
83 __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
85 unsigned long mmu_cr4_features;
87 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
90 unsigned long saved_video_mode;
92 int force_mwait __cpuinitdata;
98 char dmi_alloc_data[DMI_MAX_DATA];
103 struct screen_info screen_info;
104 EXPORT_SYMBOL(screen_info);
105 struct sys_desc_table_struct {
106 unsigned short length;
107 unsigned char table[0];
110 struct edid_info edid_info;
111 EXPORT_SYMBOL_GPL(edid_info);
113 extern int root_mountflags;
115 char __initdata command_line[COMMAND_LINE_SIZE];
117 struct resource standard_io_resources[] = {
118 { .name = "dma1", .start = 0x00, .end = 0x1f,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "pic1", .start = 0x20, .end = 0x21,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "timer0", .start = 0x40, .end = 0x43,
123 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
124 { .name = "timer1", .start = 0x50, .end = 0x53,
125 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
126 { .name = "keyboard", .start = 0x60, .end = 0x6f,
127 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
128 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
129 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
130 { .name = "pic2", .start = 0xa0, .end = 0xa1,
131 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
132 { .name = "dma2", .start = 0xc0, .end = 0xdf,
133 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
134 { .name = "fpu", .start = 0xf0, .end = 0xff,
135 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
138 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
140 static struct resource data_resource = {
141 .name = "Kernel data",
144 .flags = IORESOURCE_RAM,
146 static struct resource code_resource = {
147 .name = "Kernel code",
150 .flags = IORESOURCE_RAM,
152 static struct resource bss_resource = {
153 .name = "Kernel bss",
156 .flags = IORESOURCE_RAM,
159 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
161 #ifdef CONFIG_PROC_VMCORE
162 /* elfcorehdr= specifies the location of elf core header
163 * stored by the crashed kernel. This option will be passed
164 * by kexec loader to the capture kernel.
166 static int __init setup_elfcorehdr(char *arg)
171 elfcorehdr_addr = memparse(arg, &end);
172 return end > arg ? 0 : -EINVAL;
174 early_param("elfcorehdr", setup_elfcorehdr);
179 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
181 unsigned long bootmap_size, bootmap;
183 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
184 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
186 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
187 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
188 e820_register_active_regions(0, start_pfn, end_pfn);
189 free_bootmem_with_active_regions(0, end_pfn);
190 reserve_bootmem(bootmap, bootmap_size);
194 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
196 #ifdef CONFIG_EDD_MODULE
200 * copy_edd() - Copy the BIOS EDD information
201 * from boot_params into a safe place.
204 static inline void copy_edd(void)
206 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
207 sizeof(edd.mbr_signature));
208 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
209 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
210 edd.edd_info_nr = boot_params.eddbuf_entries;
213 static inline void copy_edd(void)
219 static void __init reserve_crashkernel(void)
221 unsigned long long free_mem;
222 unsigned long long crash_size, crash_base;
226 ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
228 ret = parse_crashkernel(boot_command_line, free_mem,
229 &crash_size, &crash_base);
230 if (ret == 0 && crash_size) {
231 if (crash_base > 0) {
232 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
233 "for crashkernel (System RAM: %ldMB)\n",
234 (unsigned long)(crash_size >> 20),
235 (unsigned long)(crash_base >> 20),
236 (unsigned long)(free_mem >> 20));
237 crashk_res.start = crash_base;
238 crashk_res.end = crash_base + crash_size - 1;
239 reserve_bootmem(crash_base, crash_size);
241 printk(KERN_INFO "crashkernel reservation failed - "
242 "you have to specify a base address\n");
246 static inline void __init reserve_crashkernel(void)
250 /* Overridden in paravirt.c if CONFIG_PARAVIRT */
251 void __attribute__((weak)) __init memory_setup(void)
253 machine_specific_memory_setup();
256 void __init setup_arch(char **cmdline_p)
260 printk(KERN_INFO "Command line: %s\n", boot_command_line);
262 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
263 screen_info = boot_params.screen_info;
264 edid_info = boot_params.edid_info;
265 saved_video_mode = boot_params.hdr.vid_mode;
266 bootloader_type = boot_params.hdr.type_of_loader;
268 #ifdef CONFIG_BLK_DEV_RAM
269 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
270 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
271 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
274 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
284 if (!boot_params.hdr.root_flags)
285 root_mountflags &= ~MS_RDONLY;
286 init_mm.start_code = (unsigned long) &_text;
287 init_mm.end_code = (unsigned long) &_etext;
288 init_mm.end_data = (unsigned long) &_edata;
289 init_mm.brk = (unsigned long) &_end;
291 code_resource.start = virt_to_phys(&_text);
292 code_resource.end = virt_to_phys(&_etext)-1;
293 data_resource.start = virt_to_phys(&_etext);
294 data_resource.end = virt_to_phys(&_edata)-1;
295 bss_resource.start = virt_to_phys(&__bss_start);
296 bss_resource.end = virt_to_phys(&__bss_stop)-1;
298 early_identify_cpu(&boot_cpu_data);
300 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
301 *cmdline_p = command_line;
305 finish_e820_parsing();
307 early_gart_iommu_check();
309 e820_register_active_regions(0, 0, -1UL);
311 * partially used pages are not usable - thus
312 * we are rounding upwards:
314 end_pfn = e820_end_of_ram();
315 /* update e820 for memory not covered by WB MTRRs */
317 if (mtrr_trim_uncached_memory(end_pfn)) {
318 e820_register_active_regions(0, 0, -1UL);
319 end_pfn = e820_end_of_ram();
322 num_physpages = end_pfn;
326 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
335 /* setup to use the early static init tables during kernel startup */
336 x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init;
337 x86_bios_cpu_apicid_early_ptr = (void *)x86_bios_cpu_apicid_init;
339 x86_cpu_to_node_map_early_ptr = (void *)x86_cpu_to_node_map_init;
345 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
346 * Call this early for SRAT node setup.
348 acpi_boot_table_init();
351 /* How many end-of-memory variables you have, grandma! */
352 max_low_pfn = end_pfn;
354 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
356 /* Remove active ranges so rediscovery with NUMA-awareness happens */
357 remove_all_active_ranges();
359 #ifdef CONFIG_ACPI_NUMA
361 * Parse SRAT to discover nodes.
367 numa_initmem_init(0, end_pfn);
369 contig_initmem_init(0, end_pfn);
372 early_res_to_bootmem();
374 #ifdef CONFIG_ACPI_SLEEP
376 * Reserve low memory region for sleep support.
378 acpi_reserve_bootmem();
382 efi_reserve_bootmem();
385 * Find and reserve possible boot-time SMP configuration:
388 #ifdef CONFIG_BLK_DEV_INITRD
389 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
390 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
391 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
392 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
393 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
395 if (ramdisk_end <= end_of_mem) {
396 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
397 initrd_start = ramdisk_image + PAGE_OFFSET;
398 initrd_end = initrd_start+ramdisk_size;
400 /* Assumes everything on node 0 */
401 free_bootmem(ramdisk_image, ramdisk_size);
402 printk(KERN_ERR "initrd extends beyond end of memory "
403 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
404 ramdisk_end, end_of_mem);
409 reserve_crashkernel();
417 * Read APIC and some other early information from ACPI tables.
425 * get boot-time SMP configuration:
427 if (smp_found_config)
429 init_apic_mappings();
430 ioapic_init_mappings();
433 * We trust e820 completely. No explicit ROM probing in memory.
435 e820_reserve_resources(&code_resource, &data_resource, &bss_resource);
436 e820_mark_nosave_regions();
438 /* request I/O space for devices used on all i[345]86 PCs */
439 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
440 request_resource(&ioport_resource, &standard_io_resources[i]);
445 #if defined(CONFIG_VGA_CONSOLE)
446 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
447 conswitchp = &vga_con;
448 #elif defined(CONFIG_DUMMY_CONSOLE)
449 conswitchp = &dummy_con;
454 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
458 if (c->extended_cpuid_level < 0x80000004)
461 v = (unsigned int *) c->x86_model_id;
462 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
463 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
464 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
465 c->x86_model_id[48] = 0;
470 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
472 unsigned int n, dummy, eax, ebx, ecx, edx;
474 n = c->extended_cpuid_level;
476 if (n >= 0x80000005) {
477 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
478 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
479 "D cache %dK (%d bytes/line)\n",
480 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
481 c->x86_cache_size = (ecx>>24) + (edx>>24);
482 /* On K8 L1 TLB is inclusive, so don't count it */
486 if (n >= 0x80000006) {
487 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
488 ecx = cpuid_ecx(0x80000006);
489 c->x86_cache_size = ecx >> 16;
490 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
492 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
493 c->x86_cache_size, ecx & 0xFF);
495 if (n >= 0x80000008) {
496 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
497 c->x86_virt_bits = (eax >> 8) & 0xff;
498 c->x86_phys_bits = eax & 0xff;
503 static int nearby_node(int apicid)
507 for (i = apicid - 1; i >= 0; i--) {
508 node = apicid_to_node[i];
509 if (node != NUMA_NO_NODE && node_online(node))
512 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
513 node = apicid_to_node[i];
514 if (node != NUMA_NO_NODE && node_online(node))
517 return first_node(node_online_map); /* Shouldn't happen */
522 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
523 * Assumes number of cores is a power of two.
525 static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
530 int cpu = smp_processor_id();
532 unsigned apicid = hard_smp_processor_id();
534 bits = c->x86_coreid_bits;
536 /* Low order bits define the core id (index of core in socket) */
537 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
538 /* Convert the APIC ID into the socket ID */
539 c->phys_proc_id = phys_pkg_id(bits);
542 node = c->phys_proc_id;
543 if (apicid_to_node[apicid] != NUMA_NO_NODE)
544 node = apicid_to_node[apicid];
545 if (!node_online(node)) {
546 /* Two possibilities here:
547 - The CPU is missing memory and no node was created.
548 In that case try picking one from a nearby CPU
549 - The APIC IDs differ from the HyperTransport node IDs
550 which the K8 northbridge parsing fills in.
551 Assume they are all increased by a constant offset,
552 but in the same order as the HT nodeids.
553 If that doesn't result in a usable node fall back to the
554 path for the previous case. */
556 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
558 if (ht_nodeid >= 0 &&
559 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
560 node = apicid_to_node[ht_nodeid];
561 /* Pick a nearby node */
562 if (!node_online(node))
563 node = nearby_node(apicid);
565 numa_set_node(cpu, node);
567 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
572 static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
577 /* Multi core CPU? */
578 if (c->extended_cpuid_level < 0x80000008)
581 ecx = cpuid_ecx(0x80000008);
583 c->x86_max_cores = (ecx & 0xff) + 1;
585 /* CPU telling us the core id bits shift? */
586 bits = (ecx >> 12) & 0xF;
588 /* Otherwise recompute */
590 while ((1 << bits) < c->x86_max_cores)
594 c->x86_coreid_bits = bits;
599 #define ENABLE_C1E_MASK 0x18000000
600 #define CPUID_PROCESSOR_SIGNATURE 1
601 #define CPUID_XFAM 0x0ff00000
602 #define CPUID_XFAM_K8 0x00000000
603 #define CPUID_XFAM_10H 0x00100000
604 #define CPUID_XFAM_11H 0x00200000
605 #define CPUID_XMOD 0x000f0000
606 #define CPUID_XMOD_REV_F 0x00040000
608 /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
609 static __cpuinit int amd_apic_timer_broken(void)
611 u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
613 switch (eax & CPUID_XFAM) {
615 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
619 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
620 if (lo & ENABLE_C1E_MASK)
624 /* err on the side of caution */
630 static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
632 early_init_amd_mc(c);
634 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
635 if (c->x86_power & (1<<8))
636 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
639 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
647 * Disable TLB flush filter by setting HWCR.FFDIS on K8
648 * bit 6 of msr C001_0015
650 * Errata 63 for SH-B3 steppings
651 * Errata 122 for all steppings (F+ have it disabled by default)
654 rdmsrl(MSR_K8_HWCR, value);
656 wrmsrl(MSR_K8_HWCR, value);
660 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
661 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
662 clear_bit(0*32+31, (unsigned long *)&c->x86_capability);
664 /* On C+ stepping K8 rep microcode works well for copy/memset */
665 level = cpuid_eax(1);
666 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
668 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
669 if (c->x86 == 0x10 || c->x86 == 0x11)
670 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
672 /* Enable workaround for FXSAVE leak */
674 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
676 level = get_model_name(c);
680 /* Should distinguish Models here, but this is only
681 a fallback anyways. */
682 strcpy(c->x86_model_id, "Hammer");
686 display_cacheinfo(c);
688 /* Multi core CPU? */
689 if (c->extended_cpuid_level >= 0x80000008)
692 if (c->extended_cpuid_level >= 0x80000006 &&
693 (cpuid_edx(0x80000006) & 0xf000))
694 num_cache_leaves = 4;
696 num_cache_leaves = 3;
698 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
699 set_cpu_cap(c, X86_FEATURE_K8);
701 /* MFENCE stops RDTSC speculation */
702 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
704 if (amd_apic_timer_broken())
705 disable_apic_timer = 1;
708 void __cpuinit detect_ht(struct cpuinfo_x86 *c)
711 u32 eax, ebx, ecx, edx;
712 int index_msb, core_bits;
714 cpuid(1, &eax, &ebx, &ecx, &edx);
717 if (!cpu_has(c, X86_FEATURE_HT))
719 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
722 smp_num_siblings = (ebx & 0xff0000) >> 16;
724 if (smp_num_siblings == 1) {
725 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
726 } else if (smp_num_siblings > 1) {
728 if (smp_num_siblings > NR_CPUS) {
729 printk(KERN_WARNING "CPU: Unsupported number of "
730 "siblings %d", smp_num_siblings);
731 smp_num_siblings = 1;
735 index_msb = get_count_order(smp_num_siblings);
736 c->phys_proc_id = phys_pkg_id(index_msb);
738 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
740 index_msb = get_count_order(smp_num_siblings);
742 core_bits = get_count_order(c->x86_max_cores);
744 c->cpu_core_id = phys_pkg_id(index_msb) &
745 ((1 << core_bits) - 1);
748 if ((c->x86_max_cores * smp_num_siblings) > 1) {
749 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
751 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
759 * find out the number of processor cores on the die
761 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
765 if (c->cpuid_level < 4)
768 cpuid_count(4, 0, &eax, &t, &t, &t);
771 return ((eax >> 26) + 1);
776 static void srat_detect_node(void)
780 int cpu = smp_processor_id();
781 int apicid = hard_smp_processor_id();
783 /* Don't do the funky fallback heuristics the AMD version employs
785 node = apicid_to_node[apicid];
786 if (node == NUMA_NO_NODE)
787 node = first_node(node_online_map);
788 numa_set_node(cpu, node);
790 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
794 static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
796 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
797 (c->x86 == 0x6 && c->x86_model >= 0x0e))
798 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
801 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
806 init_intel_cacheinfo(c);
807 if (c->cpuid_level > 9) {
808 unsigned eax = cpuid_eax(10);
809 /* Check for version and the number of counters */
810 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
811 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
816 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
818 set_cpu_cap(c, X86_FEATURE_BTS);
820 set_cpu_cap(c, X86_FEATURE_PEBS);
827 n = c->extended_cpuid_level;
828 if (n >= 0x80000008) {
829 unsigned eax = cpuid_eax(0x80000008);
830 c->x86_virt_bits = (eax >> 8) & 0xff;
831 c->x86_phys_bits = eax & 0xff;
832 /* CPUID workaround for Intel 0F34 CPU */
833 if (c->x86_vendor == X86_VENDOR_INTEL &&
834 c->x86 == 0xF && c->x86_model == 0x3 &&
836 c->x86_phys_bits = 36;
840 c->x86_cache_alignment = c->x86_clflush_size * 2;
841 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
842 (c->x86 == 0x6 && c->x86_model >= 0x0e))
843 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
845 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
846 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
847 c->x86_max_cores = intel_num_cpu_cores(c);
852 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
854 char *v = c->x86_vendor_id;
856 if (!strcmp(v, "AuthenticAMD"))
857 c->x86_vendor = X86_VENDOR_AMD;
858 else if (!strcmp(v, "GenuineIntel"))
859 c->x86_vendor = X86_VENDOR_INTEL;
861 c->x86_vendor = X86_VENDOR_UNKNOWN;
864 /* Do some early cpuid on the boot CPU to get some parameter that are
865 needed before check_bugs. Everything advanced is in identify_cpu
867 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
871 c->loops_per_jiffy = loops_per_jiffy;
872 c->x86_cache_size = -1;
873 c->x86_vendor = X86_VENDOR_UNKNOWN;
874 c->x86_model = c->x86_mask = 0; /* So far unknown... */
875 c->x86_vendor_id[0] = '\0'; /* Unset */
876 c->x86_model_id[0] = '\0'; /* Unset */
877 c->x86_clflush_size = 64;
878 c->x86_cache_alignment = c->x86_clflush_size;
879 c->x86_max_cores = 1;
880 c->x86_coreid_bits = 0;
881 c->extended_cpuid_level = 0;
882 memset(&c->x86_capability, 0, sizeof c->x86_capability);
884 /* Get vendor name */
885 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
886 (unsigned int *)&c->x86_vendor_id[0],
887 (unsigned int *)&c->x86_vendor_id[8],
888 (unsigned int *)&c->x86_vendor_id[4]);
892 /* Initialize the standard set of capabilities */
893 /* Note that the vendor-specific code below might override */
895 /* Intel-defined flags: level 0x00000001 */
896 if (c->cpuid_level >= 0x00000001) {
898 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
899 &c->x86_capability[0]);
900 c->x86 = (tfms >> 8) & 0xf;
901 c->x86_model = (tfms >> 4) & 0xf;
902 c->x86_mask = tfms & 0xf;
904 c->x86 += (tfms >> 20) & 0xff;
906 c->x86_model += ((tfms >> 16) & 0xF) << 4;
907 if (c->x86_capability[0] & (1<<19))
908 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
910 /* Have CPUID level 0 only - unheard of */
915 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
917 /* AMD-defined flags: level 0x80000001 */
918 xlvl = cpuid_eax(0x80000000);
919 c->extended_cpuid_level = xlvl;
920 if ((xlvl & 0xffff0000) == 0x80000000) {
921 if (xlvl >= 0x80000001) {
922 c->x86_capability[1] = cpuid_edx(0x80000001);
923 c->x86_capability[6] = cpuid_ecx(0x80000001);
925 if (xlvl >= 0x80000004)
926 get_model_name(c); /* Default name */
929 /* Transmeta-defined flags: level 0x80860001 */
930 xlvl = cpuid_eax(0x80860000);
931 if ((xlvl & 0xffff0000) == 0x80860000) {
932 /* Don't set x86_cpuid_level here for now to not confuse. */
933 if (xlvl >= 0x80860001)
934 c->x86_capability[2] = cpuid_edx(0x80860001);
937 c->extended_cpuid_level = cpuid_eax(0x80000000);
938 if (c->extended_cpuid_level >= 0x80000007)
939 c->x86_power = cpuid_edx(0x80000007);
941 switch (c->x86_vendor) {
945 case X86_VENDOR_INTEL:
953 * This does the hard work of actually picking apart the CPU stuff...
955 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
959 early_identify_cpu(c);
961 init_scattered_cpuid_features(c);
963 c->apicid = phys_pkg_id(0);
966 * Vendor-specific initialization. In this section we
967 * canonicalize the feature flags, meaning if there are
968 * features a certain CPU supports which CPUID doesn't
969 * tell us, CPUID claiming incorrect flags, or other bugs,
970 * we handle them here.
972 * At the end of this section, c->x86_capability better
973 * indicate the features this CPU genuinely supports!
975 switch (c->x86_vendor) {
980 case X86_VENDOR_INTEL:
984 case X86_VENDOR_UNKNOWN:
986 display_cacheinfo(c);
993 * On SMP, boot_cpu_data holds the common feature set between
994 * all CPUs; so make sure that we indicate which features are
995 * common between the CPUs. The first time this routine gets
996 * executed, c == &boot_cpu_data.
998 if (c != &boot_cpu_data) {
999 /* AND the already accumulated flags with these */
1000 for (i = 0; i < NCAPINTS; i++)
1001 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1004 /* Clear all flags overriden by options */
1005 for (i = 0; i < NCAPINTS; i++)
1006 c->x86_capability[i] ^= cleared_cpu_caps[i];
1008 #ifdef CONFIG_X86_MCE
1011 select_idle_routine(c);
1013 if (c != &boot_cpu_data)
1016 numa_add_cpu(smp_processor_id());
1021 static __init int setup_noclflush(char *arg)
1023 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
1026 __setup("noclflush", setup_noclflush);
1028 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1030 if (c->x86_model_id[0])
1031 printk(KERN_INFO "%s", c->x86_model_id);
1033 if (c->x86_mask || c->cpuid_level >= 0)
1034 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
1036 printk(KERN_CONT "\n");
1039 static __init int setup_disablecpuid(char *arg)
1042 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
1043 setup_clear_cpu_cap(bit);
1048 __setup("clearcpuid=", setup_disablecpuid);
1051 * Get CPU information for use by the procfs.
1054 static int show_cpuinfo(struct seq_file *m, void *v)
1056 struct cpuinfo_x86 *c = v;
1060 * These flag bits must match the definitions in <asm/cpufeature.h>.
1061 * NULL means this bit is undefined or reserved; either way it doesn't
1062 * have meaning as far as Linux is concerned. Note that it's important
1063 * to realize there is a difference between this table and CPUID -- if
1064 * applications want to get the raw CPUID data, they should access
1065 * /dev/cpu/<cpu_nr>/cpuid instead.
1067 static const char *const x86_cap_flags[] = {
1069 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1070 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1071 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1072 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1075 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1076 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1077 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1078 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1079 "3dnowext", "3dnow",
1081 /* Transmeta-defined */
1082 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1083 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1084 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1085 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1087 /* Other (Linux-defined) */
1088 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1089 NULL, NULL, NULL, NULL,
1090 "constant_tsc", "up", NULL, "arch_perfmon",
1091 "pebs", "bts", NULL, "sync_rdtsc",
1092 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1093 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1095 /* Intel-defined (#2) */
1096 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
1097 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
1098 NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt",
1099 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1101 /* VIA/Cyrix/Centaur-defined */
1102 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1103 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
1104 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1105 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1107 /* AMD-defined (#2) */
1108 "lahf_lm", "cmp_legacy", "svm", "extapic",
1109 "cr8_legacy", "abm", "sse4a", "misalignsse",
1110 "3dnowprefetch", "osvw", "ibs", "sse5",
1111 "skinit", "wdt", NULL, NULL,
1112 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1113 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1115 /* Auxiliary (Linux-defined) */
1116 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1117 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1118 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1119 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1121 static const char *const x86_power_flags[] = {
1122 "ts", /* temperature sensor */
1123 "fid", /* frequency id control */
1124 "vid", /* voltage id control */
1125 "ttp", /* thermal trip */
1130 "", /* tsc invariant mapped to constant_tsc */
1139 seq_printf(m, "processor\t: %u\n"
1141 "cpu family\t: %d\n"
1143 "model name\t: %s\n",
1145 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1148 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1150 if (c->x86_mask || c->cpuid_level >= 0)
1151 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1153 seq_printf(m, "stepping\t: unknown\n");
1155 if (cpu_has(c, X86_FEATURE_TSC)) {
1156 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
1160 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1161 freq / 1000, (freq % 1000));
1165 if (c->x86_cache_size >= 0)
1166 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1169 if (smp_num_siblings * c->x86_max_cores > 1) {
1170 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1171 seq_printf(m, "siblings\t: %d\n",
1172 cpus_weight(per_cpu(cpu_core_map, cpu)));
1173 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1174 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1180 "fpu_exception\t: yes\n"
1181 "cpuid level\t: %d\n"
1186 for (i = 0; i < 32*NCAPINTS; i++)
1187 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1188 seq_printf(m, " %s", x86_cap_flags[i]);
1190 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1191 c->loops_per_jiffy/(500000/HZ),
1192 (c->loops_per_jiffy/(5000/HZ)) % 100);
1194 if (c->x86_tlbsize > 0)
1195 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1196 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1197 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1199 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1200 c->x86_phys_bits, c->x86_virt_bits);
1202 seq_printf(m, "power management:");
1203 for (i = 0; i < 32; i++) {
1204 if (c->x86_power & (1 << i)) {
1205 if (i < ARRAY_SIZE(x86_power_flags) &&
1207 seq_printf(m, "%s%s",
1208 x86_power_flags[i][0]?" ":"",
1209 x86_power_flags[i]);
1211 seq_printf(m, " [%d]", i);
1215 seq_printf(m, "\n\n");
1220 static void *c_start(struct seq_file *m, loff_t *pos)
1222 if (*pos == 0) /* just in case, cpu 0 is not the first */
1223 *pos = first_cpu(cpu_online_map);
1224 if ((*pos) < NR_CPUS && cpu_online(*pos))
1225 return &cpu_data(*pos);
1229 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1231 *pos = next_cpu(*pos, cpu_online_map);
1232 return c_start(m, pos);
1235 static void c_stop(struct seq_file *m, void *v)
1239 const struct seq_operations cpuinfo_op = {
1243 .show = show_cpuinfo,