6bc7fd47e443e5bf5ffb4cacc5a2c915397777bf
[linux-2.6-microblaze.git] / arch / sparc64 / kernel / smp.c
1 /* smp.c: Sparc64 SMP support.
2  *
3  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4  */
5
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/smp_lock.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
19 #include <linux/fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/cache.h>
22 #include <linux/jiffies.h>
23 #include <linux/profile.h>
24 #include <linux/bootmem.h>
25
26 #include <asm/head.h>
27 #include <asm/ptrace.h>
28 #include <asm/atomic.h>
29 #include <asm/tlbflush.h>
30 #include <asm/mmu_context.h>
31 #include <asm/cpudata.h>
32
33 #include <asm/irq.h>
34 #include <asm/page.h>
35 #include <asm/pgtable.h>
36 #include <asm/oplib.h>
37 #include <asm/uaccess.h>
38 #include <asm/timer.h>
39 #include <asm/starfire.h>
40 #include <asm/tlb.h>
41 #include <asm/sections.h>
42
43 extern void calibrate_delay(void);
44
45 /* Please don't make this stuff initdata!!!  --DaveM */
46 static unsigned char boot_cpu_id;
47
48 cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
49 cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
50 static cpumask_t smp_commenced_mask;
51 static cpumask_t cpu_callout_map;
52
53 void smp_info(struct seq_file *m)
54 {
55         int i;
56         
57         seq_printf(m, "State:\n");
58         for (i = 0; i < NR_CPUS; i++) {
59                 if (cpu_online(i))
60                         seq_printf(m,
61                                    "CPU%d:\t\tonline\n", i);
62         }
63 }
64
65 void smp_bogo(struct seq_file *m)
66 {
67         int i;
68         
69         for (i = 0; i < NR_CPUS; i++)
70                 if (cpu_online(i))
71                         seq_printf(m,
72                                    "Cpu%dBogo\t: %lu.%02lu\n"
73                                    "Cpu%dClkTck\t: %016lx\n",
74                                    i, cpu_data(i).udelay_val / (500000/HZ),
75                                    (cpu_data(i).udelay_val / (5000/HZ)) % 100,
76                                    i, cpu_data(i).clock_tick);
77 }
78
79 void __init smp_store_cpu_info(int id)
80 {
81         int cpu_node, def;
82
83         /* multiplier and counter set by
84            smp_setup_percpu_timer()  */
85         cpu_data(id).udelay_val                 = loops_per_jiffy;
86
87         cpu_find_by_mid(id, &cpu_node);
88         cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
89                                                      "clock-frequency", 0);
90
91         def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024));
92         cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
93                                                       def);
94
95         def = 32;
96         cpu_data(id).dcache_line_size =
97                 prom_getintdefault(cpu_node, "dcache-line-size", def);
98
99         def = 16 * 1024;
100         cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size",
101                                                       def);
102
103         def = 32;
104         cpu_data(id).icache_line_size =
105                 prom_getintdefault(cpu_node, "icache-line-size", def);
106
107         def = ((tlb_type == hypervisor) ?
108                (3 * 1024 * 1024) :
109                (4 * 1024 * 1024));
110         cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size",
111                                                       def);
112
113         def = 64;
114         cpu_data(id).ecache_line_size =
115                 prom_getintdefault(cpu_node, "ecache-line-size", def);
116
117         printk("CPU[%d]: Caches "
118                "D[sz(%d):line_sz(%d)] "
119                "I[sz(%d):line_sz(%d)] "
120                "E[sz(%d):line_sz(%d)]\n",
121                id,
122                cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
123                cpu_data(id).icache_size, cpu_data(id).icache_line_size,
124                cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
125 }
126
127 static void smp_setup_percpu_timer(void);
128
129 static volatile unsigned long callin_flag = 0;
130
131 void __init smp_callin(void)
132 {
133         int cpuid = hard_smp_processor_id();
134
135         __local_per_cpu_offset = __per_cpu_offset(cpuid);
136
137         if (tlb_type == hypervisor)
138                 sun4v_ktsb_register();
139
140         __flush_tlb_all();
141
142         smp_setup_percpu_timer();
143
144         if (cheetah_pcache_forced_on)
145                 cheetah_enable_pcache();
146
147         local_irq_enable();
148
149         calibrate_delay();
150         smp_store_cpu_info(cpuid);
151         callin_flag = 1;
152         __asm__ __volatile__("membar #Sync\n\t"
153                              "flush  %%g6" : : : "memory");
154
155         /* Clear this or we will die instantly when we
156          * schedule back to this idler...
157          */
158         current_thread_info()->new_child = 0;
159
160         /* Attach to the address space of init_task. */
161         atomic_inc(&init_mm.mm_count);
162         current->active_mm = &init_mm;
163
164         while (!cpu_isset(cpuid, smp_commenced_mask))
165                 rmb();
166
167         cpu_set(cpuid, cpu_online_map);
168
169         /* idle thread is expected to have preempt disabled */
170         preempt_disable();
171 }
172
173 void cpu_panic(void)
174 {
175         printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
176         panic("SMP bolixed\n");
177 }
178
179 static unsigned long current_tick_offset __read_mostly;
180
181 /* This tick register synchronization scheme is taken entirely from
182  * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
183  *
184  * The only change I've made is to rework it so that the master
185  * initiates the synchonization instead of the slave. -DaveM
186  */
187
188 #define MASTER  0
189 #define SLAVE   (SMP_CACHE_BYTES/sizeof(unsigned long))
190
191 #define NUM_ROUNDS      64      /* magic value */
192 #define NUM_ITERS       5       /* likewise */
193
194 static DEFINE_SPINLOCK(itc_sync_lock);
195 static unsigned long go[SLAVE + 1];
196
197 #define DEBUG_TICK_SYNC 0
198
199 static inline long get_delta (long *rt, long *master)
200 {
201         unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
202         unsigned long tcenter, t0, t1, tm;
203         unsigned long i;
204
205         for (i = 0; i < NUM_ITERS; i++) {
206                 t0 = tick_ops->get_tick();
207                 go[MASTER] = 1;
208                 membar_storeload();
209                 while (!(tm = go[SLAVE]))
210                         rmb();
211                 go[SLAVE] = 0;
212                 wmb();
213                 t1 = tick_ops->get_tick();
214
215                 if (t1 - t0 < best_t1 - best_t0)
216                         best_t0 = t0, best_t1 = t1, best_tm = tm;
217         }
218
219         *rt = best_t1 - best_t0;
220         *master = best_tm - best_t0;
221
222         /* average best_t0 and best_t1 without overflow: */
223         tcenter = (best_t0/2 + best_t1/2);
224         if (best_t0 % 2 + best_t1 % 2 == 2)
225                 tcenter++;
226         return tcenter - best_tm;
227 }
228
229 void smp_synchronize_tick_client(void)
230 {
231         long i, delta, adj, adjust_latency = 0, done = 0;
232         unsigned long flags, rt, master_time_stamp, bound;
233 #if DEBUG_TICK_SYNC
234         struct {
235                 long rt;        /* roundtrip time */
236                 long master;    /* master's timestamp */
237                 long diff;      /* difference between midpoint and master's timestamp */
238                 long lat;       /* estimate of itc adjustment latency */
239         } t[NUM_ROUNDS];
240 #endif
241
242         go[MASTER] = 1;
243
244         while (go[MASTER])
245                 rmb();
246
247         local_irq_save(flags);
248         {
249                 for (i = 0; i < NUM_ROUNDS; i++) {
250                         delta = get_delta(&rt, &master_time_stamp);
251                         if (delta == 0) {
252                                 done = 1;       /* let's lock on to this... */
253                                 bound = rt;
254                         }
255
256                         if (!done) {
257                                 if (i > 0) {
258                                         adjust_latency += -delta;
259                                         adj = -delta + adjust_latency/4;
260                                 } else
261                                         adj = -delta;
262
263                                 tick_ops->add_tick(adj, current_tick_offset);
264                         }
265 #if DEBUG_TICK_SYNC
266                         t[i].rt = rt;
267                         t[i].master = master_time_stamp;
268                         t[i].diff = delta;
269                         t[i].lat = adjust_latency/4;
270 #endif
271                 }
272         }
273         local_irq_restore(flags);
274
275 #if DEBUG_TICK_SYNC
276         for (i = 0; i < NUM_ROUNDS; i++)
277                 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
278                        t[i].rt, t[i].master, t[i].diff, t[i].lat);
279 #endif
280
281         printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
282                "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
283 }
284
285 static void smp_start_sync_tick_client(int cpu);
286
287 static void smp_synchronize_one_tick(int cpu)
288 {
289         unsigned long flags, i;
290
291         go[MASTER] = 0;
292
293         smp_start_sync_tick_client(cpu);
294
295         /* wait for client to be ready */
296         while (!go[MASTER])
297                 rmb();
298
299         /* now let the client proceed into his loop */
300         go[MASTER] = 0;
301         membar_storeload();
302
303         spin_lock_irqsave(&itc_sync_lock, flags);
304         {
305                 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
306                         while (!go[MASTER])
307                                 rmb();
308                         go[MASTER] = 0;
309                         wmb();
310                         go[SLAVE] = tick_ops->get_tick();
311                         membar_storeload();
312                 }
313         }
314         spin_unlock_irqrestore(&itc_sync_lock, flags);
315 }
316
317 extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load);
318
319 extern unsigned long sparc64_cpu_startup;
320
321 /* The OBP cpu startup callback truncates the 3rd arg cookie to
322  * 32-bits (I think) so to be safe we have it read the pointer
323  * contained here so we work on >4GB machines. -DaveM
324  */
325 static struct thread_info *cpu_new_thread = NULL;
326
327 static int __devinit smp_boot_one_cpu(unsigned int cpu)
328 {
329         unsigned long entry =
330                 (unsigned long)(&sparc64_cpu_startup);
331         unsigned long cookie =
332                 (unsigned long)(&cpu_new_thread);
333         struct task_struct *p;
334         int timeout, ret;
335
336         p = fork_idle(cpu);
337         callin_flag = 0;
338         cpu_new_thread = task_thread_info(p);
339         cpu_set(cpu, cpu_callout_map);
340
341         if (tlb_type == hypervisor) {
342                 /* Alloc the mondo queues, cpu will load them.  */
343                 sun4v_init_mondo_queues(0, cpu, 1, 0);
344
345                 prom_startcpu_cpuid(cpu, entry, cookie);
346         } else {
347                 int cpu_node;
348
349                 cpu_find_by_mid(cpu, &cpu_node);
350                 prom_startcpu(cpu_node, entry, cookie);
351         }
352
353         for (timeout = 0; timeout < 5000000; timeout++) {
354                 if (callin_flag)
355                         break;
356                 udelay(100);
357         }
358
359         if (callin_flag) {
360                 ret = 0;
361         } else {
362                 printk("Processor %d is stuck.\n", cpu);
363                 cpu_clear(cpu, cpu_callout_map);
364                 ret = -ENODEV;
365         }
366         cpu_new_thread = NULL;
367
368         return ret;
369 }
370
371 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
372 {
373         u64 result, target;
374         int stuck, tmp;
375
376         if (this_is_starfire) {
377                 /* map to real upaid */
378                 cpu = (((cpu & 0x3c) << 1) |
379                         ((cpu & 0x40) >> 4) |
380                         (cpu & 0x3));
381         }
382
383         target = (cpu << 14) | 0x70;
384 again:
385         /* Ok, this is the real Spitfire Errata #54.
386          * One must read back from a UDB internal register
387          * after writes to the UDB interrupt dispatch, but
388          * before the membar Sync for that write.
389          * So we use the high UDB control register (ASI 0x7f,
390          * ADDR 0x20) for the dummy read. -DaveM
391          */
392         tmp = 0x40;
393         __asm__ __volatile__(
394         "wrpr   %1, %2, %%pstate\n\t"
395         "stxa   %4, [%0] %3\n\t"
396         "stxa   %5, [%0+%8] %3\n\t"
397         "add    %0, %8, %0\n\t"
398         "stxa   %6, [%0+%8] %3\n\t"
399         "membar #Sync\n\t"
400         "stxa   %%g0, [%7] %3\n\t"
401         "membar #Sync\n\t"
402         "mov    0x20, %%g1\n\t"
403         "ldxa   [%%g1] 0x7f, %%g0\n\t"
404         "membar #Sync"
405         : "=r" (tmp)
406         : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
407           "r" (data0), "r" (data1), "r" (data2), "r" (target),
408           "r" (0x10), "0" (tmp)
409         : "g1");
410
411         /* NOTE: PSTATE_IE is still clear. */
412         stuck = 100000;
413         do {
414                 __asm__ __volatile__("ldxa [%%g0] %1, %0"
415                         : "=r" (result)
416                         : "i" (ASI_INTR_DISPATCH_STAT));
417                 if (result == 0) {
418                         __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
419                                              : : "r" (pstate));
420                         return;
421                 }
422                 stuck -= 1;
423                 if (stuck == 0)
424                         break;
425         } while (result & 0x1);
426         __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
427                              : : "r" (pstate));
428         if (stuck == 0) {
429                 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
430                        smp_processor_id(), result);
431         } else {
432                 udelay(2);
433                 goto again;
434         }
435 }
436
437 static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
438 {
439         u64 pstate;
440         int i;
441
442         __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
443         for_each_cpu_mask(i, mask)
444                 spitfire_xcall_helper(data0, data1, data2, pstate, i);
445 }
446
447 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
448  * packet, but we have no use for that.  However we do take advantage of
449  * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
450  */
451 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
452 {
453         u64 pstate, ver;
454         int nack_busy_id, is_jbus;
455
456         if (cpus_empty(mask))
457                 return;
458
459         /* Unfortunately, someone at Sun had the brilliant idea to make the
460          * busy/nack fields hard-coded by ITID number for this Ultra-III
461          * derivative processor.
462          */
463         __asm__ ("rdpr %%ver, %0" : "=r" (ver));
464         is_jbus = ((ver >> 32) == __JALAPENO_ID ||
465                    (ver >> 32) == __SERRANO_ID);
466
467         __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
468
469 retry:
470         __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
471                              : : "r" (pstate), "i" (PSTATE_IE));
472
473         /* Setup the dispatch data registers. */
474         __asm__ __volatile__("stxa      %0, [%3] %6\n\t"
475                              "stxa      %1, [%4] %6\n\t"
476                              "stxa      %2, [%5] %6\n\t"
477                              "membar    #Sync\n\t"
478                              : /* no outputs */
479                              : "r" (data0), "r" (data1), "r" (data2),
480                                "r" (0x40), "r" (0x50), "r" (0x60),
481                                "i" (ASI_INTR_W));
482
483         nack_busy_id = 0;
484         {
485                 int i;
486
487                 for_each_cpu_mask(i, mask) {
488                         u64 target = (i << 14) | 0x70;
489
490                         if (!is_jbus)
491                                 target |= (nack_busy_id << 24);
492                         __asm__ __volatile__(
493                                 "stxa   %%g0, [%0] %1\n\t"
494                                 "membar #Sync\n\t"
495                                 : /* no outputs */
496                                 : "r" (target), "i" (ASI_INTR_W));
497                         nack_busy_id++;
498                 }
499         }
500
501         /* Now, poll for completion. */
502         {
503                 u64 dispatch_stat;
504                 long stuck;
505
506                 stuck = 100000 * nack_busy_id;
507                 do {
508                         __asm__ __volatile__("ldxa      [%%g0] %1, %0"
509                                              : "=r" (dispatch_stat)
510                                              : "i" (ASI_INTR_DISPATCH_STAT));
511                         if (dispatch_stat == 0UL) {
512                                 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
513                                                      : : "r" (pstate));
514                                 return;
515                         }
516                         if (!--stuck)
517                                 break;
518                 } while (dispatch_stat & 0x5555555555555555UL);
519
520                 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
521                                      : : "r" (pstate));
522
523                 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
524                         /* Busy bits will not clear, continue instead
525                          * of freezing up on this cpu.
526                          */
527                         printk("CPU[%d]: mondo stuckage result[%016lx]\n",
528                                smp_processor_id(), dispatch_stat);
529                 } else {
530                         int i, this_busy_nack = 0;
531
532                         /* Delay some random time with interrupts enabled
533                          * to prevent deadlock.
534                          */
535                         udelay(2 * nack_busy_id);
536
537                         /* Clear out the mask bits for cpus which did not
538                          * NACK us.
539                          */
540                         for_each_cpu_mask(i, mask) {
541                                 u64 check_mask;
542
543                                 if (is_jbus)
544                                         check_mask = (0x2UL << (2*i));
545                                 else
546                                         check_mask = (0x2UL <<
547                                                       this_busy_nack);
548                                 if ((dispatch_stat & check_mask) == 0)
549                                         cpu_clear(i, mask);
550                                 this_busy_nack += 2;
551                         }
552
553                         goto retry;
554                 }
555         }
556 }
557
558 /* Multi-cpu list version.  */
559 static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
560 {
561         struct trap_per_cpu *tb;
562         u16 *cpu_list;
563         u64 *mondo;
564         cpumask_t error_mask;
565         unsigned long flags, status;
566         int cnt, retries, this_cpu, i;
567
568         /* We have to do this whole thing with interrupts fully disabled.
569          * Otherwise if we send an xcall from interrupt context it will
570          * corrupt both our mondo block and cpu list state.
571          *
572          * One consequence of this is that we cannot use timeout mechanisms
573          * that depend upon interrupts being delivered locally.  So, for
574          * example, we cannot sample jiffies and expect it to advance.
575          *
576          * Fortunately, udelay() uses %stick/%tick so we can use that.
577          */
578         local_irq_save(flags);
579
580         this_cpu = smp_processor_id();
581         tb = &trap_block[this_cpu];
582
583         mondo = __va(tb->cpu_mondo_block_pa);
584         mondo[0] = data0;
585         mondo[1] = data1;
586         mondo[2] = data2;
587         wmb();
588
589         cpu_list = __va(tb->cpu_list_pa);
590
591         /* Setup the initial cpu list.  */
592         cnt = 0;
593         for_each_cpu_mask(i, mask)
594                 cpu_list[cnt++] = i;
595
596         cpus_clear(error_mask);
597         retries = 0;
598         do {
599                 int forward_progress;
600
601                 status = sun4v_cpu_mondo_send(cnt,
602                                               tb->cpu_list_pa,
603                                               tb->cpu_mondo_block_pa);
604
605                 /* HV_EOK means all cpus received the xcall, we're done.  */
606                 if (likely(status == HV_EOK))
607                         break;
608
609                 /* First, clear out all the cpus in the mask that were
610                  * successfully sent to.  The hypervisor indicates this
611                  * by setting the cpu list entry of such cpus to 0xffff.
612                  */
613                 forward_progress = 0;
614                 for (i = 0; i < cnt; i++) {
615                         if (cpu_list[i] == 0xffff) {
616                                 cpu_clear(i, mask);
617                                 forward_progress = 1;
618                         }
619                 }
620
621                 /* If we get a HV_ECPUERROR, then one or more of the cpus
622                  * in the list are in error state.  Use the cpu_state()
623                  * hypervisor call to find out which cpus are in error state.
624                  */
625                 if (unlikely(status == HV_ECPUERROR)) {
626                         for (i = 0; i < cnt; i++) {
627                                 long err;
628                                 u16 cpu;
629
630                                 cpu = cpu_list[i];
631                                 if (cpu == 0xffff)
632                                         continue;
633
634                                 err = sun4v_cpu_state(cpu);
635                                 if (err >= 0 &&
636                                     err == HV_CPU_STATE_ERROR) {
637                                         cpu_clear(cpu, mask);
638                                         cpu_set(cpu, error_mask);
639                                 }
640                         }
641                 } else if (unlikely(status != HV_EWOULDBLOCK))
642                         goto fatal_mondo_error;
643
644                 /* Rebuild the cpu_list[] array and try again.  */
645                 cnt = 0;
646                 for_each_cpu_mask(i, mask)
647                         cpu_list[cnt++] = i;
648
649                 if (unlikely(!forward_progress)) {
650                         if (unlikely(++retries > 10000))
651                                 goto fatal_mondo_timeout;
652
653                         /* Delay a little bit to let other cpus catch up
654                          * on their cpu mondo queue work.
655                          */
656                         udelay(2 * cnt);
657                 }
658         } while (1);
659
660         local_irq_restore(flags);
661
662         if (unlikely(!cpus_empty(error_mask)))
663                 goto fatal_mondo_cpu_error;
664
665         return;
666
667 fatal_mondo_cpu_error:
668         printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
669                "were in error state\n",
670                this_cpu);
671         printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
672         for_each_cpu_mask(i, error_mask)
673                 printk("%d ", i);
674         printk("]\n");
675         return;
676
677 fatal_mondo_timeout:
678         local_irq_restore(flags);
679         printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
680                " progress after %d retries.\n",
681                this_cpu, retries);
682         goto dump_cpu_list_and_out;
683
684 fatal_mondo_error:
685         local_irq_restore(flags);
686         printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
687                this_cpu, status);
688         printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
689                "mondo_block_pa(%lx)\n",
690                this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
691
692 dump_cpu_list_and_out:
693         printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
694         for (i = 0; i < cnt; i++)
695                 printk("%u ", cpu_list[i]);
696         printk("]\n");
697 }
698
699 /* Send cross call to all processors mentioned in MASK
700  * except self.
701  */
702 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
703 {
704         u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
705         int this_cpu = get_cpu();
706
707         cpus_and(mask, mask, cpu_online_map);
708         cpu_clear(this_cpu, mask);
709
710         if (tlb_type == spitfire)
711                 spitfire_xcall_deliver(data0, data1, data2, mask);
712         else if (tlb_type == cheetah || tlb_type == cheetah_plus)
713                 cheetah_xcall_deliver(data0, data1, data2, mask);
714         else
715                 hypervisor_xcall_deliver(data0, data1, data2, mask);
716         /* NOTE: Caller runs local copy on master. */
717
718         put_cpu();
719 }
720
721 extern unsigned long xcall_sync_tick;
722
723 static void smp_start_sync_tick_client(int cpu)
724 {
725         cpumask_t mask = cpumask_of_cpu(cpu);
726
727         smp_cross_call_masked(&xcall_sync_tick,
728                               0, 0, 0, mask);
729 }
730
731 /* Send cross call to all processors except self. */
732 #define smp_cross_call(func, ctx, data1, data2) \
733         smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
734
735 struct call_data_struct {
736         void (*func) (void *info);
737         void *info;
738         atomic_t finished;
739         int wait;
740 };
741
742 static DEFINE_SPINLOCK(call_lock);
743 static struct call_data_struct *call_data;
744
745 extern unsigned long xcall_call_function;
746
747 /*
748  * You must not call this function with disabled interrupts or from a
749  * hardware interrupt handler or from a bottom half handler.
750  */
751 static int smp_call_function_mask(void (*func)(void *info), void *info,
752                                   int nonatomic, int wait, cpumask_t mask)
753 {
754         struct call_data_struct data;
755         int cpus = cpus_weight(mask) - 1;
756         long timeout;
757
758         if (!cpus)
759                 return 0;
760
761         /* Can deadlock when called with interrupts disabled */
762         WARN_ON(irqs_disabled());
763
764         data.func = func;
765         data.info = info;
766         atomic_set(&data.finished, 0);
767         data.wait = wait;
768
769         spin_lock(&call_lock);
770
771         call_data = &data;
772
773         smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
774
775         /* 
776          * Wait for other cpus to complete function or at
777          * least snap the call data.
778          */
779         timeout = 1000000;
780         while (atomic_read(&data.finished) != cpus) {
781                 if (--timeout <= 0)
782                         goto out_timeout;
783                 barrier();
784                 udelay(1);
785         }
786
787         spin_unlock(&call_lock);
788
789         return 0;
790
791 out_timeout:
792         spin_unlock(&call_lock);
793         printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n",
794                cpus, atomic_read(&data.finished));
795         return 0;
796 }
797
798 int smp_call_function(void (*func)(void *info), void *info,
799                       int nonatomic, int wait)
800 {
801         return smp_call_function_mask(func, info, nonatomic, wait,
802                                       cpu_online_map);
803 }
804
805 void smp_call_function_client(int irq, struct pt_regs *regs)
806 {
807         void (*func) (void *info) = call_data->func;
808         void *info = call_data->info;
809
810         clear_softint(1 << irq);
811         if (call_data->wait) {
812                 /* let initiator proceed only after completion */
813                 func(info);
814                 atomic_inc(&call_data->finished);
815         } else {
816                 /* let initiator proceed after getting data */
817                 atomic_inc(&call_data->finished);
818                 func(info);
819         }
820 }
821
822 static void tsb_sync(void *info)
823 {
824         struct mm_struct *mm = info;
825
826         if (current->active_mm == mm)
827                 tsb_context_switch(mm);
828 }
829
830 void smp_tsb_sync(struct mm_struct *mm)
831 {
832         smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
833 }
834
835 extern unsigned long xcall_flush_tlb_mm;
836 extern unsigned long xcall_flush_tlb_pending;
837 extern unsigned long xcall_flush_tlb_kernel_range;
838 extern unsigned long xcall_report_regs;
839 extern unsigned long xcall_receive_signal;
840
841 #ifdef DCACHE_ALIASING_POSSIBLE
842 extern unsigned long xcall_flush_dcache_page_cheetah;
843 #endif
844 extern unsigned long xcall_flush_dcache_page_spitfire;
845
846 #ifdef CONFIG_DEBUG_DCFLUSH
847 extern atomic_t dcpage_flushes;
848 extern atomic_t dcpage_flushes_xcall;
849 #endif
850
851 static __inline__ void __local_flush_dcache_page(struct page *page)
852 {
853 #ifdef DCACHE_ALIASING_POSSIBLE
854         __flush_dcache_page(page_address(page),
855                             ((tlb_type == spitfire) &&
856                              page_mapping(page) != NULL));
857 #else
858         if (page_mapping(page) != NULL &&
859             tlb_type == spitfire)
860                 __flush_icache_page(__pa(page_address(page)));
861 #endif
862 }
863
864 void smp_flush_dcache_page_impl(struct page *page, int cpu)
865 {
866         cpumask_t mask = cpumask_of_cpu(cpu);
867         int this_cpu;
868
869         if (tlb_type == hypervisor)
870                 return;
871
872 #ifdef CONFIG_DEBUG_DCFLUSH
873         atomic_inc(&dcpage_flushes);
874 #endif
875
876         this_cpu = get_cpu();
877
878         if (cpu == this_cpu) {
879                 __local_flush_dcache_page(page);
880         } else if (cpu_online(cpu)) {
881                 void *pg_addr = page_address(page);
882                 u64 data0;
883
884                 if (tlb_type == spitfire) {
885                         data0 =
886                                 ((u64)&xcall_flush_dcache_page_spitfire);
887                         if (page_mapping(page) != NULL)
888                                 data0 |= ((u64)1 << 32);
889                         spitfire_xcall_deliver(data0,
890                                                __pa(pg_addr),
891                                                (u64) pg_addr,
892                                                mask);
893                 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
894 #ifdef DCACHE_ALIASING_POSSIBLE
895                         data0 =
896                                 ((u64)&xcall_flush_dcache_page_cheetah);
897                         cheetah_xcall_deliver(data0,
898                                               __pa(pg_addr),
899                                               0, mask);
900 #endif
901                 }
902 #ifdef CONFIG_DEBUG_DCFLUSH
903                 atomic_inc(&dcpage_flushes_xcall);
904 #endif
905         }
906
907         put_cpu();
908 }
909
910 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
911 {
912         void *pg_addr = page_address(page);
913         cpumask_t mask = cpu_online_map;
914         u64 data0;
915         int this_cpu;
916
917         if (tlb_type == hypervisor)
918                 return;
919
920         this_cpu = get_cpu();
921
922         cpu_clear(this_cpu, mask);
923
924 #ifdef CONFIG_DEBUG_DCFLUSH
925         atomic_inc(&dcpage_flushes);
926 #endif
927         if (cpus_empty(mask))
928                 goto flush_self;
929         if (tlb_type == spitfire) {
930                 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
931                 if (page_mapping(page) != NULL)
932                         data0 |= ((u64)1 << 32);
933                 spitfire_xcall_deliver(data0,
934                                        __pa(pg_addr),
935                                        (u64) pg_addr,
936                                        mask);
937         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
938 #ifdef DCACHE_ALIASING_POSSIBLE
939                 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
940                 cheetah_xcall_deliver(data0,
941                                       __pa(pg_addr),
942                                       0, mask);
943 #endif
944         }
945 #ifdef CONFIG_DEBUG_DCFLUSH
946         atomic_inc(&dcpage_flushes_xcall);
947 #endif
948  flush_self:
949         __local_flush_dcache_page(page);
950
951         put_cpu();
952 }
953
954 static void __smp_receive_signal_mask(cpumask_t mask)
955 {
956         smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
957 }
958
959 void smp_receive_signal(int cpu)
960 {
961         cpumask_t mask = cpumask_of_cpu(cpu);
962
963         if (cpu_online(cpu))
964                 __smp_receive_signal_mask(mask);
965 }
966
967 void smp_receive_signal_client(int irq, struct pt_regs *regs)
968 {
969         struct mm_struct *mm;
970
971         clear_softint(1 << irq);
972
973         /* See if we need to allocate a new TLB context because
974          * the version of the one we are using is now out of date.
975          */
976         mm = current->active_mm;
977         if (likely(mm)) {
978                 unsigned long flags;
979
980                 spin_lock_irqsave(&mm->context.lock, flags);
981
982                 if (unlikely(!CTX_VALID(mm->context)))
983                         get_new_mmu_context(mm);
984
985                 load_secondary_context(mm);
986                 __flush_tlb_mm(CTX_HWBITS(mm->context),
987                                SECONDARY_CONTEXT);
988
989                 spin_unlock_irqrestore(&mm->context.lock, flags);
990         }
991 }
992
993 void smp_new_mmu_context_version(void)
994 {
995         __smp_receive_signal_mask(cpu_online_map);
996 }
997
998 void smp_report_regs(void)
999 {
1000         smp_cross_call(&xcall_report_regs, 0, 0, 0);
1001 }
1002
1003 /* We know that the window frames of the user have been flushed
1004  * to the stack before we get here because all callers of us
1005  * are flush_tlb_*() routines, and these run after flush_cache_*()
1006  * which performs the flushw.
1007  *
1008  * The SMP TLB coherency scheme we use works as follows:
1009  *
1010  * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1011  *    space has (potentially) executed on, this is the heuristic
1012  *    we use to avoid doing cross calls.
1013  *
1014  *    Also, for flushing from kswapd and also for clones, we
1015  *    use cpu_vm_mask as the list of cpus to make run the TLB.
1016  *
1017  * 2) TLB context numbers are shared globally across all processors
1018  *    in the system, this allows us to play several games to avoid
1019  *    cross calls.
1020  *
1021  *    One invariant is that when a cpu switches to a process, and
1022  *    that processes tsk->active_mm->cpu_vm_mask does not have the
1023  *    current cpu's bit set, that tlb context is flushed locally.
1024  *
1025  *    If the address space is non-shared (ie. mm->count == 1) we avoid
1026  *    cross calls when we want to flush the currently running process's
1027  *    tlb state.  This is done by clearing all cpu bits except the current
1028  *    processor's in current->active_mm->cpu_vm_mask and performing the
1029  *    flush locally only.  This will force any subsequent cpus which run
1030  *    this task to flush the context from the local tlb if the process
1031  *    migrates to another cpu (again).
1032  *
1033  * 3) For shared address spaces (threads) and swapping we bite the
1034  *    bullet for most cases and perform the cross call (but only to
1035  *    the cpus listed in cpu_vm_mask).
1036  *
1037  *    The performance gain from "optimizing" away the cross call for threads is
1038  *    questionable (in theory the big win for threads is the massive sharing of
1039  *    address space state across processors).
1040  */
1041
1042 /* This currently is only used by the hugetlb arch pre-fault
1043  * hook on UltraSPARC-III+ and later when changing the pagesize
1044  * bits of the context register for an address space.
1045  */
1046 void smp_flush_tlb_mm(struct mm_struct *mm)
1047 {
1048         u32 ctx = CTX_HWBITS(mm->context);
1049         int cpu = get_cpu();
1050
1051         if (atomic_read(&mm->mm_users) == 1) {
1052                 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1053                 goto local_flush_and_out;
1054         }
1055
1056         smp_cross_call_masked(&xcall_flush_tlb_mm,
1057                               ctx, 0, 0,
1058                               mm->cpu_vm_mask);
1059
1060 local_flush_and_out:
1061         __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1062
1063         put_cpu();
1064 }
1065
1066 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1067 {
1068         u32 ctx = CTX_HWBITS(mm->context);
1069         int cpu = get_cpu();
1070
1071         if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
1072                 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1073         else
1074                 smp_cross_call_masked(&xcall_flush_tlb_pending,
1075                                       ctx, nr, (unsigned long) vaddrs,
1076                                       mm->cpu_vm_mask);
1077
1078         __flush_tlb_pending(ctx, nr, vaddrs);
1079
1080         put_cpu();
1081 }
1082
1083 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1084 {
1085         start &= PAGE_MASK;
1086         end    = PAGE_ALIGN(end);
1087         if (start != end) {
1088                 smp_cross_call(&xcall_flush_tlb_kernel_range,
1089                                0, start, end);
1090
1091                 __flush_tlb_kernel_range(start, end);
1092         }
1093 }
1094
1095 /* CPU capture. */
1096 /* #define CAPTURE_DEBUG */
1097 extern unsigned long xcall_capture;
1098
1099 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1100 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1101 static unsigned long penguins_are_doing_time;
1102
1103 void smp_capture(void)
1104 {
1105         int result = atomic_add_ret(1, &smp_capture_depth);
1106
1107         if (result == 1) {
1108                 int ncpus = num_online_cpus();
1109
1110 #ifdef CAPTURE_DEBUG
1111                 printk("CPU[%d]: Sending penguins to jail...",
1112                        smp_processor_id());
1113 #endif
1114                 penguins_are_doing_time = 1;
1115                 membar_storestore_loadstore();
1116                 atomic_inc(&smp_capture_registry);
1117                 smp_cross_call(&xcall_capture, 0, 0, 0);
1118                 while (atomic_read(&smp_capture_registry) != ncpus)
1119                         rmb();
1120 #ifdef CAPTURE_DEBUG
1121                 printk("done\n");
1122 #endif
1123         }
1124 }
1125
1126 void smp_release(void)
1127 {
1128         if (atomic_dec_and_test(&smp_capture_depth)) {
1129 #ifdef CAPTURE_DEBUG
1130                 printk("CPU[%d]: Giving pardon to "
1131                        "imprisoned penguins\n",
1132                        smp_processor_id());
1133 #endif
1134                 penguins_are_doing_time = 0;
1135                 membar_storeload_storestore();
1136                 atomic_dec(&smp_capture_registry);
1137         }
1138 }
1139
1140 /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1141  * can service tlb flush xcalls...
1142  */
1143 extern void prom_world(int);
1144
1145 void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1146 {
1147         clear_softint(1 << irq);
1148
1149         preempt_disable();
1150
1151         __asm__ __volatile__("flushw");
1152         prom_world(1);
1153         atomic_inc(&smp_capture_registry);
1154         membar_storeload_storestore();
1155         while (penguins_are_doing_time)
1156                 rmb();
1157         atomic_dec(&smp_capture_registry);
1158         prom_world(0);
1159
1160         preempt_enable();
1161 }
1162
1163 #define prof_multiplier(__cpu)          cpu_data(__cpu).multiplier
1164 #define prof_counter(__cpu)             cpu_data(__cpu).counter
1165
1166 void smp_percpu_timer_interrupt(struct pt_regs *regs)
1167 {
1168         unsigned long compare, tick, pstate;
1169         int cpu = smp_processor_id();
1170         int user = user_mode(regs);
1171
1172         /*
1173          * Check for level 14 softint.
1174          */
1175         {
1176                 unsigned long tick_mask = tick_ops->softint_mask;
1177
1178                 if (!(get_softint() & tick_mask)) {
1179                         extern void handler_irq(int, struct pt_regs *);
1180
1181                         handler_irq(14, regs);
1182                         return;
1183                 }
1184                 clear_softint(tick_mask);
1185         }
1186
1187         do {
1188                 profile_tick(CPU_PROFILING, regs);
1189                 if (!--prof_counter(cpu)) {
1190                         irq_enter();
1191
1192                         if (cpu == boot_cpu_id) {
1193                                 kstat_this_cpu.irqs[0]++;
1194                                 timer_tick_interrupt(regs);
1195                         }
1196
1197                         update_process_times(user);
1198
1199                         irq_exit();
1200
1201                         prof_counter(cpu) = prof_multiplier(cpu);
1202                 }
1203
1204                 /* Guarantee that the following sequences execute
1205                  * uninterrupted.
1206                  */
1207                 __asm__ __volatile__("rdpr      %%pstate, %0\n\t"
1208                                      "wrpr      %0, %1, %%pstate"
1209                                      : "=r" (pstate)
1210                                      : "i" (PSTATE_IE));
1211
1212                 compare = tick_ops->add_compare(current_tick_offset);
1213                 tick = tick_ops->get_tick();
1214
1215                 /* Restore PSTATE_IE. */
1216                 __asm__ __volatile__("wrpr      %0, 0x0, %%pstate"
1217                                      : /* no outputs */
1218                                      : "r" (pstate));
1219         } while (time_after_eq(tick, compare));
1220 }
1221
1222 static void __init smp_setup_percpu_timer(void)
1223 {
1224         int cpu = smp_processor_id();
1225         unsigned long pstate;
1226
1227         prof_counter(cpu) = prof_multiplier(cpu) = 1;
1228
1229         /* Guarantee that the following sequences execute
1230          * uninterrupted.
1231          */
1232         __asm__ __volatile__("rdpr      %%pstate, %0\n\t"
1233                              "wrpr      %0, %1, %%pstate"
1234                              : "=r" (pstate)
1235                              : "i" (PSTATE_IE));
1236
1237         tick_ops->init_tick(current_tick_offset);
1238
1239         /* Restore PSTATE_IE. */
1240         __asm__ __volatile__("wrpr      %0, 0x0, %%pstate"
1241                              : /* no outputs */
1242                              : "r" (pstate));
1243 }
1244
1245 void __init smp_tick_init(void)
1246 {
1247         boot_cpu_id = hard_smp_processor_id();
1248         current_tick_offset = timer_tick_offset;
1249
1250         cpu_set(boot_cpu_id, cpu_online_map);
1251         prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
1252 }
1253
1254 /* /proc/profile writes can call this, don't __init it please. */
1255 static DEFINE_SPINLOCK(prof_setup_lock);
1256
1257 int setup_profiling_timer(unsigned int multiplier)
1258 {
1259         unsigned long flags;
1260         int i;
1261
1262         if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
1263                 return -EINVAL;
1264
1265         spin_lock_irqsave(&prof_setup_lock, flags);
1266         for (i = 0; i < NR_CPUS; i++)
1267                 prof_multiplier(i) = multiplier;
1268         current_tick_offset = (timer_tick_offset / multiplier);
1269         spin_unlock_irqrestore(&prof_setup_lock, flags);
1270
1271         return 0;
1272 }
1273
1274 /* Constrain the number of cpus to max_cpus.  */
1275 void __init smp_prepare_cpus(unsigned int max_cpus)
1276 {
1277         if (num_possible_cpus() > max_cpus) {
1278                 int instance, mid;
1279
1280                 instance = 0;
1281                 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1282                         if (mid != boot_cpu_id) {
1283                                 cpu_clear(mid, phys_cpu_present_map);
1284                                 if (num_possible_cpus() <= max_cpus)
1285                                         break;
1286                         }
1287                         instance++;
1288                 }
1289         }
1290
1291         smp_store_cpu_info(boot_cpu_id);
1292 }
1293
1294 /* Set this up early so that things like the scheduler can init
1295  * properly.  We use the same cpu mask for both the present and
1296  * possible cpu map.
1297  */
1298 void __init smp_setup_cpu_possible_map(void)
1299 {
1300         int instance, mid;
1301
1302         instance = 0;
1303         while (!cpu_find_by_instance(instance, NULL, &mid)) {
1304                 if (mid < NR_CPUS)
1305                         cpu_set(mid, phys_cpu_present_map);
1306                 instance++;
1307         }
1308 }
1309
1310 void __devinit smp_prepare_boot_cpu(void)
1311 {
1312         int cpu = hard_smp_processor_id();
1313
1314         if (cpu >= NR_CPUS) {
1315                 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
1316                 prom_halt();
1317         }
1318
1319         current_thread_info()->cpu = cpu;
1320         __local_per_cpu_offset = __per_cpu_offset(cpu);
1321
1322         cpu_set(smp_processor_id(), cpu_online_map);
1323         cpu_set(smp_processor_id(), phys_cpu_present_map);
1324 }
1325
1326 int __devinit __cpu_up(unsigned int cpu)
1327 {
1328         int ret = smp_boot_one_cpu(cpu);
1329
1330         if (!ret) {
1331                 cpu_set(cpu, smp_commenced_mask);
1332                 while (!cpu_isset(cpu, cpu_online_map))
1333                         mb();
1334                 if (!cpu_isset(cpu, cpu_online_map)) {
1335                         ret = -ENODEV;
1336                 } else {
1337                         /* On SUN4V, writes to %tick and %stick are
1338                          * not allowed.
1339                          */
1340                         if (tlb_type != hypervisor)
1341                                 smp_synchronize_one_tick(cpu);
1342                 }
1343         }
1344         return ret;
1345 }
1346
1347 void __init smp_cpus_done(unsigned int max_cpus)
1348 {
1349         unsigned long bogosum = 0;
1350         int i;
1351
1352         for (i = 0; i < NR_CPUS; i++) {
1353                 if (cpu_online(i))
1354                         bogosum += cpu_data(i).udelay_val;
1355         }
1356         printk("Total of %ld processors activated "
1357                "(%lu.%02lu BogoMIPS).\n",
1358                (long) num_online_cpus(),
1359                bogosum/(500000/HZ),
1360                (bogosum/(5000/HZ))%100);
1361 }
1362
1363 void smp_send_reschedule(int cpu)
1364 {
1365         smp_receive_signal(cpu);
1366 }
1367
1368 /* This is a nop because we capture all other cpus
1369  * anyways when making the PROM active.
1370  */
1371 void smp_send_stop(void)
1372 {
1373 }
1374
1375 unsigned long __per_cpu_base __read_mostly;
1376 unsigned long __per_cpu_shift __read_mostly;
1377
1378 EXPORT_SYMBOL(__per_cpu_base);
1379 EXPORT_SYMBOL(__per_cpu_shift);
1380
1381 void __init setup_per_cpu_areas(void)
1382 {
1383         unsigned long goal, size, i;
1384         char *ptr;
1385
1386         /* Copy section for each CPU (we discard the original) */
1387         goal = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
1388 #ifdef CONFIG_MODULES
1389         if (goal < PERCPU_ENOUGH_ROOM)
1390                 goal = PERCPU_ENOUGH_ROOM;
1391 #endif
1392         __per_cpu_shift = 0;
1393         for (size = 1UL; size < goal; size <<= 1UL)
1394                 __per_cpu_shift++;
1395
1396         ptr = alloc_bootmem(size * NR_CPUS);
1397
1398         __per_cpu_base = ptr - __per_cpu_start;
1399
1400         for (i = 0; i < NR_CPUS; i++, ptr += size)
1401                 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
1402 }