ARM: default machine descriptor for multiplatform
[linux-2.6-microblaze.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_platform.h>
22 #include <linux/init.h>
23 #include <linux/kexec.h>
24 #include <linux/of_fdt.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memblock.h>
30 #include <linux/bug.h>
31 #include <linux/compiler.h>
32 #include <linux/sort.h>
33
34 #include <asm/unified.h>
35 #include <asm/cp15.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/elf.h>
39 #include <asm/procinfo.h>
40 #include <asm/sections.h>
41 #include <asm/setup.h>
42 #include <asm/smp_plat.h>
43 #include <asm/mach-types.h>
44 #include <asm/cacheflush.h>
45 #include <asm/cachetype.h>
46 #include <asm/tlbflush.h>
47
48 #include <asm/prom.h>
49 #include <asm/mach/arch.h>
50 #include <asm/mach/irq.h>
51 #include <asm/mach/time.h>
52 #include <asm/system_info.h>
53 #include <asm/system_misc.h>
54 #include <asm/traps.h>
55 #include <asm/unwind.h>
56 #include <asm/memblock.h>
57 #include <asm/virt.h>
58
59 #include "atags.h"
60 #include "tcm.h"
61
62
63 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
64 char fpe_type[8];
65
66 static int __init fpe_setup(char *line)
67 {
68         memcpy(fpe_type, line, 8);
69         return 1;
70 }
71
72 __setup("fpe=", fpe_setup);
73 #endif
74
75 extern void paging_init(struct machine_desc *desc);
76 extern void sanity_check_meminfo(void);
77 extern void reboot_setup(char *str);
78 extern void setup_dma_zone(struct machine_desc *desc);
79
80 unsigned int processor_id;
81 EXPORT_SYMBOL(processor_id);
82 unsigned int __machine_arch_type __read_mostly;
83 EXPORT_SYMBOL(__machine_arch_type);
84 unsigned int cacheid __read_mostly;
85 EXPORT_SYMBOL(cacheid);
86
87 unsigned int __atags_pointer __initdata;
88
89 unsigned int system_rev;
90 EXPORT_SYMBOL(system_rev);
91
92 unsigned int system_serial_low;
93 EXPORT_SYMBOL(system_serial_low);
94
95 unsigned int system_serial_high;
96 EXPORT_SYMBOL(system_serial_high);
97
98 unsigned int elf_hwcap __read_mostly;
99 EXPORT_SYMBOL(elf_hwcap);
100
101
102 #ifdef MULTI_CPU
103 struct processor processor __read_mostly;
104 #endif
105 #ifdef MULTI_TLB
106 struct cpu_tlb_fns cpu_tlb __read_mostly;
107 #endif
108 #ifdef MULTI_USER
109 struct cpu_user_fns cpu_user __read_mostly;
110 #endif
111 #ifdef MULTI_CACHE
112 struct cpu_cache_fns cpu_cache __read_mostly;
113 #endif
114 #ifdef CONFIG_OUTER_CACHE
115 struct outer_cache_fns outer_cache __read_mostly;
116 EXPORT_SYMBOL(outer_cache);
117 #endif
118
119 /*
120  * Cached cpu_architecture() result for use by assembler code.
121  * C code should use the cpu_architecture() function instead of accessing this
122  * variable directly.
123  */
124 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
125
126 struct stack {
127         u32 irq[3];
128         u32 abt[3];
129         u32 und[3];
130 } ____cacheline_aligned;
131
132 static struct stack stacks[NR_CPUS];
133
134 char elf_platform[ELF_PLATFORM_SIZE];
135 EXPORT_SYMBOL(elf_platform);
136
137 static const char *cpu_name;
138 static const char *machine_name;
139 static char __initdata cmd_line[COMMAND_LINE_SIZE];
140 struct machine_desc *machine_desc __initdata;
141
142 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
143 #define ENDIANNESS ((char)endian_test.l)
144
145 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
146
147 /*
148  * Standard memory resources
149  */
150 static struct resource mem_res[] = {
151         {
152                 .name = "Video RAM",
153                 .start = 0,
154                 .end = 0,
155                 .flags = IORESOURCE_MEM
156         },
157         {
158                 .name = "Kernel code",
159                 .start = 0,
160                 .end = 0,
161                 .flags = IORESOURCE_MEM
162         },
163         {
164                 .name = "Kernel data",
165                 .start = 0,
166                 .end = 0,
167                 .flags = IORESOURCE_MEM
168         }
169 };
170
171 #define video_ram   mem_res[0]
172 #define kernel_code mem_res[1]
173 #define kernel_data mem_res[2]
174
175 static struct resource io_res[] = {
176         {
177                 .name = "reserved",
178                 .start = 0x3bc,
179                 .end = 0x3be,
180                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
181         },
182         {
183                 .name = "reserved",
184                 .start = 0x378,
185                 .end = 0x37f,
186                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
187         },
188         {
189                 .name = "reserved",
190                 .start = 0x278,
191                 .end = 0x27f,
192                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
193         }
194 };
195
196 #define lp0 io_res[0]
197 #define lp1 io_res[1]
198 #define lp2 io_res[2]
199
200 static const char *proc_arch[] = {
201         "undefined/unknown",
202         "3",
203         "4",
204         "4T",
205         "5",
206         "5T",
207         "5TE",
208         "5TEJ",
209         "6TEJ",
210         "7",
211         "?(11)",
212         "?(12)",
213         "?(13)",
214         "?(14)",
215         "?(15)",
216         "?(16)",
217         "?(17)",
218 };
219
220 static int __get_cpu_architecture(void)
221 {
222         int cpu_arch;
223
224         if ((read_cpuid_id() & 0x0008f000) == 0) {
225                 cpu_arch = CPU_ARCH_UNKNOWN;
226         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
227                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
228         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
229                 cpu_arch = (read_cpuid_id() >> 16) & 7;
230                 if (cpu_arch)
231                         cpu_arch += CPU_ARCH_ARMv3;
232         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
233                 unsigned int mmfr0;
234
235                 /* Revised CPUID format. Read the Memory Model Feature
236                  * Register 0 and check for VMSAv7 or PMSAv7 */
237                 asm("mrc        p15, 0, %0, c0, c1, 4"
238                     : "=r" (mmfr0));
239                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
240                     (mmfr0 & 0x000000f0) >= 0x00000030)
241                         cpu_arch = CPU_ARCH_ARMv7;
242                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
243                          (mmfr0 & 0x000000f0) == 0x00000020)
244                         cpu_arch = CPU_ARCH_ARMv6;
245                 else
246                         cpu_arch = CPU_ARCH_UNKNOWN;
247         } else
248                 cpu_arch = CPU_ARCH_UNKNOWN;
249
250         return cpu_arch;
251 }
252
253 int __pure cpu_architecture(void)
254 {
255         BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
256
257         return __cpu_architecture;
258 }
259
260 static int cpu_has_aliasing_icache(unsigned int arch)
261 {
262         int aliasing_icache;
263         unsigned int id_reg, num_sets, line_size;
264
265         /* PIPT caches never alias. */
266         if (icache_is_pipt())
267                 return 0;
268
269         /* arch specifies the register format */
270         switch (arch) {
271         case CPU_ARCH_ARMv7:
272                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
273                     : /* No output operands */
274                     : "r" (1));
275                 isb();
276                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
277                     : "=r" (id_reg));
278                 line_size = 4 << ((id_reg & 0x7) + 2);
279                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
280                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
281                 break;
282         case CPU_ARCH_ARMv6:
283                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
284                 break;
285         default:
286                 /* I-cache aliases will be handled by D-cache aliasing code */
287                 aliasing_icache = 0;
288         }
289
290         return aliasing_icache;
291 }
292
293 static void __init cacheid_init(void)
294 {
295         unsigned int cachetype = read_cpuid_cachetype();
296         unsigned int arch = cpu_architecture();
297
298         if (arch >= CPU_ARCH_ARMv6) {
299                 if ((cachetype & (7 << 29)) == 4 << 29) {
300                         /* ARMv7 register format */
301                         arch = CPU_ARCH_ARMv7;
302                         cacheid = CACHEID_VIPT_NONALIASING;
303                         switch (cachetype & (3 << 14)) {
304                         case (1 << 14):
305                                 cacheid |= CACHEID_ASID_TAGGED;
306                                 break;
307                         case (3 << 14):
308                                 cacheid |= CACHEID_PIPT;
309                                 break;
310                         }
311                 } else {
312                         arch = CPU_ARCH_ARMv6;
313                         if (cachetype & (1 << 23))
314                                 cacheid = CACHEID_VIPT_ALIASING;
315                         else
316                                 cacheid = CACHEID_VIPT_NONALIASING;
317                 }
318                 if (cpu_has_aliasing_icache(arch))
319                         cacheid |= CACHEID_VIPT_I_ALIASING;
320         } else {
321                 cacheid = CACHEID_VIVT;
322         }
323
324         printk("CPU: %s data cache, %s instruction cache\n",
325                 cache_is_vivt() ? "VIVT" :
326                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
327                 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
328                 cache_is_vivt() ? "VIVT" :
329                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
330                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
331                 icache_is_pipt() ? "PIPT" :
332                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
333 }
334
335 /*
336  * These functions re-use the assembly code in head.S, which
337  * already provide the required functionality.
338  */
339 extern struct proc_info_list *lookup_processor_type(unsigned int);
340
341 void __init early_print(const char *str, ...)
342 {
343         extern void printascii(const char *);
344         char buf[256];
345         va_list ap;
346
347         va_start(ap, str);
348         vsnprintf(buf, sizeof(buf), str, ap);
349         va_end(ap);
350
351 #ifdef CONFIG_DEBUG_LL
352         printascii(buf);
353 #endif
354         printk("%s", buf);
355 }
356
357 static void __init feat_v6_fixup(void)
358 {
359         int id = read_cpuid_id();
360
361         if ((id & 0xff0f0000) != 0x41070000)
362                 return;
363
364         /*
365          * HWCAP_TLS is available only on 1136 r1p0 and later,
366          * see also kuser_get_tls_init.
367          */
368         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
369                 elf_hwcap &= ~HWCAP_TLS;
370 }
371
372 /*
373  * cpu_init - initialise one CPU.
374  *
375  * cpu_init sets up the per-CPU stacks.
376  */
377 void cpu_init(void)
378 {
379         unsigned int cpu = smp_processor_id();
380         struct stack *stk = &stacks[cpu];
381
382         if (cpu >= NR_CPUS) {
383                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
384                 BUG();
385         }
386
387         /*
388          * This only works on resume and secondary cores. For booting on the
389          * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
390          */
391         set_my_cpu_offset(per_cpu_offset(cpu));
392
393         cpu_proc_init();
394
395         /*
396          * Define the placement constraint for the inline asm directive below.
397          * In Thumb-2, msr with an immediate value is not allowed.
398          */
399 #ifdef CONFIG_THUMB2_KERNEL
400 #define PLC     "r"
401 #else
402 #define PLC     "I"
403 #endif
404
405         /*
406          * setup stacks for re-entrant exception handlers
407          */
408         __asm__ (
409         "msr    cpsr_c, %1\n\t"
410         "add    r14, %0, %2\n\t"
411         "mov    sp, r14\n\t"
412         "msr    cpsr_c, %3\n\t"
413         "add    r14, %0, %4\n\t"
414         "mov    sp, r14\n\t"
415         "msr    cpsr_c, %5\n\t"
416         "add    r14, %0, %6\n\t"
417         "mov    sp, r14\n\t"
418         "msr    cpsr_c, %7"
419             :
420             : "r" (stk),
421               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
422               "I" (offsetof(struct stack, irq[0])),
423               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
424               "I" (offsetof(struct stack, abt[0])),
425               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
426               "I" (offsetof(struct stack, und[0])),
427               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
428             : "r14");
429 }
430
431 int __cpu_logical_map[NR_CPUS];
432
433 void __init smp_setup_processor_id(void)
434 {
435         int i;
436         u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
437         u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
438
439         cpu_logical_map(0) = cpu;
440         for (i = 1; i < nr_cpu_ids; ++i)
441                 cpu_logical_map(i) = i == cpu ? 0 : i;
442
443         printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
444 }
445
446 static void __init setup_processor(void)
447 {
448         struct proc_info_list *list;
449
450         /*
451          * locate processor in the list of supported processor
452          * types.  The linker builds this table for us from the
453          * entries in arch/arm/mm/proc-*.S
454          */
455         list = lookup_processor_type(read_cpuid_id());
456         if (!list) {
457                 printk("CPU configuration botched (ID %08x), unable "
458                        "to continue.\n", read_cpuid_id());
459                 while (1);
460         }
461
462         cpu_name = list->cpu_name;
463         __cpu_architecture = __get_cpu_architecture();
464
465 #ifdef MULTI_CPU
466         processor = *list->proc;
467 #endif
468 #ifdef MULTI_TLB
469         cpu_tlb = *list->tlb;
470 #endif
471 #ifdef MULTI_USER
472         cpu_user = *list->user;
473 #endif
474 #ifdef MULTI_CACHE
475         cpu_cache = *list->cache;
476 #endif
477
478         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
479                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
480                proc_arch[cpu_architecture()], cr_alignment);
481
482         snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
483                  list->arch_name, ENDIANNESS);
484         snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
485                  list->elf_name, ENDIANNESS);
486         elf_hwcap = list->elf_hwcap;
487 #ifndef CONFIG_ARM_THUMB
488         elf_hwcap &= ~HWCAP_THUMB;
489 #endif
490
491         feat_v6_fixup();
492
493         cacheid_init();
494         cpu_init();
495 }
496
497 void __init dump_machine_table(void)
498 {
499         struct machine_desc *p;
500
501         early_print("Available machine support:\n\nID (hex)\tNAME\n");
502         for_each_machine_desc(p)
503                 early_print("%08x\t%s\n", p->nr, p->name);
504
505         early_print("\nPlease check your kernel config and/or bootloader.\n");
506
507         while (true)
508                 /* can't use cpu_relax() here as it may require MMU setup */;
509 }
510
511 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
512 {
513         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
514
515         if (meminfo.nr_banks >= NR_BANKS) {
516                 printk(KERN_CRIT "NR_BANKS too low, "
517                         "ignoring memory at 0x%08llx\n", (long long)start);
518                 return -EINVAL;
519         }
520
521         /*
522          * Ensure that start/size are aligned to a page boundary.
523          * Size is appropriately rounded down, start is rounded up.
524          */
525         size -= start & ~PAGE_MASK;
526         bank->start = PAGE_ALIGN(start);
527
528 #ifndef CONFIG_LPAE
529         if (bank->start + size < bank->start) {
530                 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
531                         "32-bit physical address space\n", (long long)start);
532                 /*
533                  * To ensure bank->start + bank->size is representable in
534                  * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
535                  * This means we lose a page after masking.
536                  */
537                 size = ULONG_MAX - bank->start;
538         }
539 #endif
540
541         bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
542
543         /*
544          * Check whether this memory region has non-zero size or
545          * invalid node number.
546          */
547         if (bank->size == 0)
548                 return -EINVAL;
549
550         meminfo.nr_banks++;
551         return 0;
552 }
553
554 /*
555  * Pick out the memory size.  We look for mem=size@start,
556  * where start and size are "size[KkMm]"
557  */
558 static int __init early_mem(char *p)
559 {
560         static int usermem __initdata = 0;
561         phys_addr_t size;
562         phys_addr_t start;
563         char *endp;
564
565         /*
566          * If the user specifies memory size, we
567          * blow away any automatically generated
568          * size.
569          */
570         if (usermem == 0) {
571                 usermem = 1;
572                 meminfo.nr_banks = 0;
573         }
574
575         start = PHYS_OFFSET;
576         size  = memparse(p, &endp);
577         if (*endp == '@')
578                 start = memparse(endp + 1, NULL);
579
580         arm_add_memory(start, size);
581
582         return 0;
583 }
584 early_param("mem", early_mem);
585
586 static void __init request_standard_resources(struct machine_desc *mdesc)
587 {
588         struct memblock_region *region;
589         struct resource *res;
590
591         kernel_code.start   = virt_to_phys(_text);
592         kernel_code.end     = virt_to_phys(_etext - 1);
593         kernel_data.start   = virt_to_phys(_sdata);
594         kernel_data.end     = virt_to_phys(_end - 1);
595
596         for_each_memblock(memory, region) {
597                 res = alloc_bootmem_low(sizeof(*res));
598                 res->name  = "System RAM";
599                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
600                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
601                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
602
603                 request_resource(&iomem_resource, res);
604
605                 if (kernel_code.start >= res->start &&
606                     kernel_code.end <= res->end)
607                         request_resource(res, &kernel_code);
608                 if (kernel_data.start >= res->start &&
609                     kernel_data.end <= res->end)
610                         request_resource(res, &kernel_data);
611         }
612
613         if (mdesc->video_start) {
614                 video_ram.start = mdesc->video_start;
615                 video_ram.end   = mdesc->video_end;
616                 request_resource(&iomem_resource, &video_ram);
617         }
618
619         /*
620          * Some machines don't have the possibility of ever
621          * possessing lp0, lp1 or lp2
622          */
623         if (mdesc->reserve_lp0)
624                 request_resource(&ioport_resource, &lp0);
625         if (mdesc->reserve_lp1)
626                 request_resource(&ioport_resource, &lp1);
627         if (mdesc->reserve_lp2)
628                 request_resource(&ioport_resource, &lp2);
629 }
630
631 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
632 struct screen_info screen_info = {
633  .orig_video_lines      = 30,
634  .orig_video_cols       = 80,
635  .orig_video_mode       = 0,
636  .orig_video_ega_bx     = 0,
637  .orig_video_isVGA      = 1,
638  .orig_video_points     = 8
639 };
640 #endif
641
642 static int __init customize_machine(void)
643 {
644         /*
645          * customizes platform devices, or adds new ones
646          * On DT based machines, we fall back to populating the
647          * machine from the device tree, if no callback is provided,
648          * otherwise we would always need an init_machine callback.
649          */
650         if (machine_desc->init_machine)
651                 machine_desc->init_machine();
652 #ifdef CONFIG_OF
653         else
654                 of_platform_populate(NULL, of_default_bus_match_table,
655                                         NULL, NULL);
656 #endif
657         return 0;
658 }
659 arch_initcall(customize_machine);
660
661 static int __init init_machine_late(void)
662 {
663         if (machine_desc->init_late)
664                 machine_desc->init_late();
665         return 0;
666 }
667 late_initcall(init_machine_late);
668
669 #ifdef CONFIG_KEXEC
670 static inline unsigned long long get_total_mem(void)
671 {
672         unsigned long total;
673
674         total = max_low_pfn - min_low_pfn;
675         return total << PAGE_SHIFT;
676 }
677
678 /**
679  * reserve_crashkernel() - reserves memory are for crash kernel
680  *
681  * This function reserves memory area given in "crashkernel=" kernel command
682  * line parameter. The memory reserved is used by a dump capture kernel when
683  * primary kernel is crashing.
684  */
685 static void __init reserve_crashkernel(void)
686 {
687         unsigned long long crash_size, crash_base;
688         unsigned long long total_mem;
689         int ret;
690
691         total_mem = get_total_mem();
692         ret = parse_crashkernel(boot_command_line, total_mem,
693                                 &crash_size, &crash_base);
694         if (ret)
695                 return;
696
697         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
698         if (ret < 0) {
699                 printk(KERN_WARNING "crashkernel reservation failed - "
700                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
701                 return;
702         }
703
704         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
705                "for crashkernel (System RAM: %ldMB)\n",
706                (unsigned long)(crash_size >> 20),
707                (unsigned long)(crash_base >> 20),
708                (unsigned long)(total_mem >> 20));
709
710         crashk_res.start = crash_base;
711         crashk_res.end = crash_base + crash_size - 1;
712         insert_resource(&iomem_resource, &crashk_res);
713 }
714 #else
715 static inline void reserve_crashkernel(void) {}
716 #endif /* CONFIG_KEXEC */
717
718 static int __init meminfo_cmp(const void *_a, const void *_b)
719 {
720         const struct membank *a = _a, *b = _b;
721         long cmp = bank_pfn_start(a) - bank_pfn_start(b);
722         return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
723 }
724
725 void __init hyp_mode_check(void)
726 {
727 #ifdef CONFIG_ARM_VIRT_EXT
728         if (is_hyp_mode_available()) {
729                 pr_info("CPU: All CPU(s) started in HYP mode.\n");
730                 pr_info("CPU: Virtualization extensions available.\n");
731         } else if (is_hyp_mode_mismatched()) {
732                 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
733                         __boot_cpu_mode & MODE_MASK);
734                 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
735         } else
736                 pr_info("CPU: All CPU(s) started in SVC mode.\n");
737 #endif
738 }
739
740 void __init setup_arch(char **cmdline_p)
741 {
742         struct machine_desc *mdesc;
743
744         setup_processor();
745         mdesc = setup_machine_fdt(__atags_pointer);
746         if (!mdesc)
747                 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
748         machine_desc = mdesc;
749         machine_name = mdesc->name;
750
751         setup_dma_zone(mdesc);
752
753         if (mdesc->restart_mode)
754                 reboot_setup(&mdesc->restart_mode);
755
756         init_mm.start_code = (unsigned long) _text;
757         init_mm.end_code   = (unsigned long) _etext;
758         init_mm.end_data   = (unsigned long) _edata;
759         init_mm.brk        = (unsigned long) _end;
760
761         /* populate cmd_line too for later use, preserving boot_command_line */
762         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
763         *cmdline_p = cmd_line;
764
765         parse_early_param();
766
767         sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
768         sanity_check_meminfo();
769         arm_memblock_init(&meminfo, mdesc);
770
771         paging_init(mdesc);
772         request_standard_resources(mdesc);
773
774         if (mdesc->restart)
775                 arm_pm_restart = mdesc->restart;
776
777         unflatten_device_tree();
778
779         arm_dt_init_cpu_maps();
780 #ifdef CONFIG_SMP
781         if (is_smp()) {
782                 smp_set_ops(mdesc->smp);
783                 smp_init_cpus();
784         }
785 #endif
786
787         if (!is_smp())
788                 hyp_mode_check();
789
790         reserve_crashkernel();
791
792         tcm_init();
793
794 #ifdef CONFIG_MULTI_IRQ_HANDLER
795         handle_arch_irq = mdesc->handle_irq;
796 #endif
797
798 #ifdef CONFIG_VT
799 #if defined(CONFIG_VGA_CONSOLE)
800         conswitchp = &vga_con;
801 #elif defined(CONFIG_DUMMY_CONSOLE)
802         conswitchp = &dummy_con;
803 #endif
804 #endif
805
806         if (mdesc->init_early)
807                 mdesc->init_early();
808 }
809
810
811 static int __init topology_init(void)
812 {
813         int cpu;
814
815         for_each_possible_cpu(cpu) {
816                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
817                 cpuinfo->cpu.hotpluggable = 1;
818                 register_cpu(&cpuinfo->cpu, cpu);
819         }
820
821         return 0;
822 }
823 subsys_initcall(topology_init);
824
825 #ifdef CONFIG_HAVE_PROC_CPU
826 static int __init proc_cpu_init(void)
827 {
828         struct proc_dir_entry *res;
829
830         res = proc_mkdir("cpu", NULL);
831         if (!res)
832                 return -ENOMEM;
833         return 0;
834 }
835 fs_initcall(proc_cpu_init);
836 #endif
837
838 static const char *hwcap_str[] = {
839         "swp",
840         "half",
841         "thumb",
842         "26bit",
843         "fastmult",
844         "fpa",
845         "vfp",
846         "edsp",
847         "java",
848         "iwmmxt",
849         "crunch",
850         "thumbee",
851         "neon",
852         "vfpv3",
853         "vfpv3d16",
854         "tls",
855         "vfpv4",
856         "idiva",
857         "idivt",
858         NULL
859 };
860
861 static int c_show(struct seq_file *m, void *v)
862 {
863         int i, j;
864         u32 cpuid;
865
866         for_each_online_cpu(i) {
867                 /*
868                  * glibc reads /proc/cpuinfo to determine the number of
869                  * online processors, looking for lines beginning with
870                  * "processor".  Give glibc what it expects.
871                  */
872                 seq_printf(m, "processor\t: %d\n", i);
873                 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
874                 seq_printf(m, "model name\t: %s rev %d (%s)\n",
875                            cpu_name, cpuid & 15, elf_platform);
876
877 #if defined(CONFIG_SMP)
878                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
879                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
880                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
881 #else
882                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
883                            loops_per_jiffy / (500000/HZ),
884                            (loops_per_jiffy / (5000/HZ)) % 100);
885 #endif
886                 /* dump out the processor features */
887                 seq_puts(m, "Features\t: ");
888
889                 for (j = 0; hwcap_str[j]; j++)
890                         if (elf_hwcap & (1 << j))
891                                 seq_printf(m, "%s ", hwcap_str[j]);
892
893                 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
894                 seq_printf(m, "CPU architecture: %s\n",
895                            proc_arch[cpu_architecture()]);
896
897                 if ((cpuid & 0x0008f000) == 0x00000000) {
898                         /* pre-ARM7 */
899                         seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
900                 } else {
901                         if ((cpuid & 0x0008f000) == 0x00007000) {
902                                 /* ARM7 */
903                                 seq_printf(m, "CPU variant\t: 0x%02x\n",
904                                            (cpuid >> 16) & 127);
905                         } else {
906                                 /* post-ARM7 */
907                                 seq_printf(m, "CPU variant\t: 0x%x\n",
908                                            (cpuid >> 20) & 15);
909                         }
910                         seq_printf(m, "CPU part\t: 0x%03x\n",
911                                    (cpuid >> 4) & 0xfff);
912                 }
913                 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
914         }
915
916         seq_printf(m, "Hardware\t: %s\n", machine_name);
917         seq_printf(m, "Revision\t: %04x\n", system_rev);
918         seq_printf(m, "Serial\t\t: %08x%08x\n",
919                    system_serial_high, system_serial_low);
920
921         return 0;
922 }
923
924 static void *c_start(struct seq_file *m, loff_t *pos)
925 {
926         return *pos < 1 ? (void *)1 : NULL;
927 }
928
929 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
930 {
931         ++*pos;
932         return NULL;
933 }
934
935 static void c_stop(struct seq_file *m, void *v)
936 {
937 }
938
939 const struct seq_operations cpuinfo_op = {
940         .start  = c_start,
941         .next   = c_next,
942         .stop   = c_stop,
943         .show   = c_show
944 };