1 /* smp.c: Sparc64 SMP support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/smp_lock.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
20 #include <linux/seq_file.h>
21 #include <linux/cache.h>
22 #include <linux/jiffies.h>
23 #include <linux/profile.h>
24 #include <linux/bootmem.h>
27 #include <asm/ptrace.h>
28 #include <asm/atomic.h>
29 #include <asm/tlbflush.h>
30 #include <asm/mmu_context.h>
31 #include <asm/cpudata.h>
35 #include <asm/pgtable.h>
36 #include <asm/oplib.h>
37 #include <asm/uaccess.h>
38 #include <asm/timer.h>
39 #include <asm/starfire.h>
41 #include <asm/sections.h>
43 extern void calibrate_delay(void);
45 /* Please don't make this stuff initdata!!! --DaveM */
46 static unsigned char boot_cpu_id;
48 cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
49 cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
50 static cpumask_t smp_commenced_mask;
51 static cpumask_t cpu_callout_map;
53 void smp_info(struct seq_file *m)
57 seq_printf(m, "State:\n");
58 for (i = 0; i < NR_CPUS; i++) {
61 "CPU%d:\t\tonline\n", i);
65 void smp_bogo(struct seq_file *m)
69 for (i = 0; i < NR_CPUS; i++)
72 "Cpu%dBogo\t: %lu.%02lu\n"
73 "Cpu%dClkTck\t: %016lx\n",
74 i, cpu_data(i).udelay_val / (500000/HZ),
75 (cpu_data(i).udelay_val / (5000/HZ)) % 100,
76 i, cpu_data(i).clock_tick);
79 void __init smp_store_cpu_info(int id)
83 /* multiplier and counter set by
84 smp_setup_percpu_timer() */
85 cpu_data(id).udelay_val = loops_per_jiffy;
87 cpu_find_by_mid(id, &cpu_node);
88 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
89 "clock-frequency", 0);
91 def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024));
92 cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
96 cpu_data(id).dcache_line_size =
97 prom_getintdefault(cpu_node, "dcache-line-size", def);
100 cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size",
104 cpu_data(id).icache_line_size =
105 prom_getintdefault(cpu_node, "icache-line-size", def);
107 def = ((tlb_type == hypervisor) ?
110 cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size",
114 cpu_data(id).ecache_line_size =
115 prom_getintdefault(cpu_node, "ecache-line-size", def);
117 printk("CPU[%d]: Caches "
118 "D[sz(%d):line_sz(%d)] "
119 "I[sz(%d):line_sz(%d)] "
120 "E[sz(%d):line_sz(%d)]\n",
122 cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
123 cpu_data(id).icache_size, cpu_data(id).icache_line_size,
124 cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
127 static void smp_setup_percpu_timer(void);
129 static volatile unsigned long callin_flag = 0;
131 void __init smp_callin(void)
133 int cpuid = hard_smp_processor_id();
135 __local_per_cpu_offset = __per_cpu_offset(cpuid);
137 if (tlb_type == hypervisor)
138 sun4v_ktsb_register();
142 smp_setup_percpu_timer();
144 if (cheetah_pcache_forced_on)
145 cheetah_enable_pcache();
150 smp_store_cpu_info(cpuid);
152 __asm__ __volatile__("membar #Sync\n\t"
153 "flush %%g6" : : : "memory");
155 /* Clear this or we will die instantly when we
156 * schedule back to this idler...
158 current_thread_info()->new_child = 0;
160 /* Attach to the address space of init_task. */
161 atomic_inc(&init_mm.mm_count);
162 current->active_mm = &init_mm;
164 while (!cpu_isset(cpuid, smp_commenced_mask))
167 cpu_set(cpuid, cpu_online_map);
169 /* idle thread is expected to have preempt disabled */
175 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
176 panic("SMP bolixed\n");
179 static unsigned long current_tick_offset __read_mostly;
181 /* This tick register synchronization scheme is taken entirely from
182 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
184 * The only change I've made is to rework it so that the master
185 * initiates the synchonization instead of the slave. -DaveM
189 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
191 #define NUM_ROUNDS 64 /* magic value */
192 #define NUM_ITERS 5 /* likewise */
194 static DEFINE_SPINLOCK(itc_sync_lock);
195 static unsigned long go[SLAVE + 1];
197 #define DEBUG_TICK_SYNC 0
199 static inline long get_delta (long *rt, long *master)
201 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
202 unsigned long tcenter, t0, t1, tm;
205 for (i = 0; i < NUM_ITERS; i++) {
206 t0 = tick_ops->get_tick();
209 while (!(tm = go[SLAVE]))
213 t1 = tick_ops->get_tick();
215 if (t1 - t0 < best_t1 - best_t0)
216 best_t0 = t0, best_t1 = t1, best_tm = tm;
219 *rt = best_t1 - best_t0;
220 *master = best_tm - best_t0;
222 /* average best_t0 and best_t1 without overflow: */
223 tcenter = (best_t0/2 + best_t1/2);
224 if (best_t0 % 2 + best_t1 % 2 == 2)
226 return tcenter - best_tm;
229 void smp_synchronize_tick_client(void)
231 long i, delta, adj, adjust_latency = 0, done = 0;
232 unsigned long flags, rt, master_time_stamp, bound;
235 long rt; /* roundtrip time */
236 long master; /* master's timestamp */
237 long diff; /* difference between midpoint and master's timestamp */
238 long lat; /* estimate of itc adjustment latency */
247 local_irq_save(flags);
249 for (i = 0; i < NUM_ROUNDS; i++) {
250 delta = get_delta(&rt, &master_time_stamp);
252 done = 1; /* let's lock on to this... */
258 adjust_latency += -delta;
259 adj = -delta + adjust_latency/4;
263 tick_ops->add_tick(adj, current_tick_offset);
267 t[i].master = master_time_stamp;
269 t[i].lat = adjust_latency/4;
273 local_irq_restore(flags);
276 for (i = 0; i < NUM_ROUNDS; i++)
277 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
278 t[i].rt, t[i].master, t[i].diff, t[i].lat);
281 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
282 "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
285 static void smp_start_sync_tick_client(int cpu);
287 static void smp_synchronize_one_tick(int cpu)
289 unsigned long flags, i;
293 smp_start_sync_tick_client(cpu);
295 /* wait for client to be ready */
299 /* now let the client proceed into his loop */
303 spin_lock_irqsave(&itc_sync_lock, flags);
305 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
310 go[SLAVE] = tick_ops->get_tick();
314 spin_unlock_irqrestore(&itc_sync_lock, flags);
317 extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load);
319 extern unsigned long sparc64_cpu_startup;
321 /* The OBP cpu startup callback truncates the 3rd arg cookie to
322 * 32-bits (I think) so to be safe we have it read the pointer
323 * contained here so we work on >4GB machines. -DaveM
325 static struct thread_info *cpu_new_thread = NULL;
327 static int __devinit smp_boot_one_cpu(unsigned int cpu)
329 unsigned long entry =
330 (unsigned long)(&sparc64_cpu_startup);
331 unsigned long cookie =
332 (unsigned long)(&cpu_new_thread);
333 struct task_struct *p;
338 cpu_new_thread = task_thread_info(p);
339 cpu_set(cpu, cpu_callout_map);
341 if (tlb_type == hypervisor) {
342 /* Alloc the mondo queues, cpu will load them. */
343 sun4v_init_mondo_queues(0, cpu, 1, 0);
345 prom_startcpu_cpuid(cpu, entry, cookie);
349 cpu_find_by_mid(cpu, &cpu_node);
350 prom_startcpu(cpu_node, entry, cookie);
353 for (timeout = 0; timeout < 5000000; timeout++) {
362 printk("Processor %d is stuck.\n", cpu);
363 cpu_clear(cpu, cpu_callout_map);
366 cpu_new_thread = NULL;
371 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
376 if (this_is_starfire) {
377 /* map to real upaid */
378 cpu = (((cpu & 0x3c) << 1) |
379 ((cpu & 0x40) >> 4) |
383 target = (cpu << 14) | 0x70;
385 /* Ok, this is the real Spitfire Errata #54.
386 * One must read back from a UDB internal register
387 * after writes to the UDB interrupt dispatch, but
388 * before the membar Sync for that write.
389 * So we use the high UDB control register (ASI 0x7f,
390 * ADDR 0x20) for the dummy read. -DaveM
393 __asm__ __volatile__(
394 "wrpr %1, %2, %%pstate\n\t"
395 "stxa %4, [%0] %3\n\t"
396 "stxa %5, [%0+%8] %3\n\t"
398 "stxa %6, [%0+%8] %3\n\t"
400 "stxa %%g0, [%7] %3\n\t"
403 "ldxa [%%g1] 0x7f, %%g0\n\t"
406 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
407 "r" (data0), "r" (data1), "r" (data2), "r" (target),
408 "r" (0x10), "0" (tmp)
411 /* NOTE: PSTATE_IE is still clear. */
414 __asm__ __volatile__("ldxa [%%g0] %1, %0"
416 : "i" (ASI_INTR_DISPATCH_STAT));
418 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
425 } while (result & 0x1);
426 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
429 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
430 smp_processor_id(), result);
437 static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
442 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
443 for_each_cpu_mask(i, mask)
444 spitfire_xcall_helper(data0, data1, data2, pstate, i);
447 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
448 * packet, but we have no use for that. However we do take advantage of
449 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
451 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
454 int nack_busy_id, is_jbus;
456 if (cpus_empty(mask))
459 /* Unfortunately, someone at Sun had the brilliant idea to make the
460 * busy/nack fields hard-coded by ITID number for this Ultra-III
461 * derivative processor.
463 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
464 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
465 (ver >> 32) == __SERRANO_ID);
467 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
470 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
471 : : "r" (pstate), "i" (PSTATE_IE));
473 /* Setup the dispatch data registers. */
474 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
475 "stxa %1, [%4] %6\n\t"
476 "stxa %2, [%5] %6\n\t"
479 : "r" (data0), "r" (data1), "r" (data2),
480 "r" (0x40), "r" (0x50), "r" (0x60),
487 for_each_cpu_mask(i, mask) {
488 u64 target = (i << 14) | 0x70;
491 target |= (nack_busy_id << 24);
492 __asm__ __volatile__(
493 "stxa %%g0, [%0] %1\n\t"
496 : "r" (target), "i" (ASI_INTR_W));
501 /* Now, poll for completion. */
506 stuck = 100000 * nack_busy_id;
508 __asm__ __volatile__("ldxa [%%g0] %1, %0"
509 : "=r" (dispatch_stat)
510 : "i" (ASI_INTR_DISPATCH_STAT));
511 if (dispatch_stat == 0UL) {
512 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
518 } while (dispatch_stat & 0x5555555555555555UL);
520 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
523 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
524 /* Busy bits will not clear, continue instead
525 * of freezing up on this cpu.
527 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
528 smp_processor_id(), dispatch_stat);
530 int i, this_busy_nack = 0;
532 /* Delay some random time with interrupts enabled
533 * to prevent deadlock.
535 udelay(2 * nack_busy_id);
537 /* Clear out the mask bits for cpus which did not
540 for_each_cpu_mask(i, mask) {
544 check_mask = (0x2UL << (2*i));
546 check_mask = (0x2UL <<
548 if ((dispatch_stat & check_mask) == 0)
558 /* Multi-cpu list version. */
559 static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
561 struct trap_per_cpu *tb;
564 cpumask_t error_mask;
565 unsigned long flags, status;
566 int cnt, retries, this_cpu, i;
568 /* We have to do this whole thing with interrupts fully disabled.
569 * Otherwise if we send an xcall from interrupt context it will
570 * corrupt both our mondo block and cpu list state.
572 * One consequence of this is that we cannot use timeout mechanisms
573 * that depend upon interrupts being delivered locally. So, for
574 * example, we cannot sample jiffies and expect it to advance.
576 * Fortunately, udelay() uses %stick/%tick so we can use that.
578 local_irq_save(flags);
580 this_cpu = smp_processor_id();
581 tb = &trap_block[this_cpu];
583 mondo = __va(tb->cpu_mondo_block_pa);
589 cpu_list = __va(tb->cpu_list_pa);
591 /* Setup the initial cpu list. */
593 for_each_cpu_mask(i, mask)
596 cpus_clear(error_mask);
599 int forward_progress;
601 status = sun4v_cpu_mondo_send(cnt,
603 tb->cpu_mondo_block_pa);
605 /* HV_EOK means all cpus received the xcall, we're done. */
606 if (likely(status == HV_EOK))
609 /* First, clear out all the cpus in the mask that were
610 * successfully sent to. The hypervisor indicates this
611 * by setting the cpu list entry of such cpus to 0xffff.
613 forward_progress = 0;
614 for (i = 0; i < cnt; i++) {
615 if (cpu_list[i] == 0xffff) {
617 forward_progress = 1;
621 /* If we get a HV_ECPUERROR, then one or more of the cpus
622 * in the list are in error state. Use the cpu_state()
623 * hypervisor call to find out which cpus are in error state.
625 if (unlikely(status == HV_ECPUERROR)) {
626 for (i = 0; i < cnt; i++) {
634 err = sun4v_cpu_state(cpu);
636 err == HV_CPU_STATE_ERROR) {
637 cpu_clear(cpu, mask);
638 cpu_set(cpu, error_mask);
641 } else if (unlikely(status != HV_EWOULDBLOCK))
642 goto fatal_mondo_error;
644 /* Rebuild the cpu_list[] array and try again. */
646 for_each_cpu_mask(i, mask)
649 if (unlikely(!forward_progress)) {
650 if (unlikely(++retries > 10000))
651 goto fatal_mondo_timeout;
653 /* Delay a little bit to let other cpus catch up
654 * on their cpu mondo queue work.
660 local_irq_restore(flags);
662 if (unlikely(!cpus_empty(error_mask)))
663 goto fatal_mondo_cpu_error;
667 fatal_mondo_cpu_error:
668 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
669 "were in error state\n",
671 printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
672 for_each_cpu_mask(i, error_mask)
678 local_irq_restore(flags);
679 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
680 " progress after %d retries.\n",
682 goto dump_cpu_list_and_out;
685 local_irq_restore(flags);
686 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
688 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
689 "mondo_block_pa(%lx)\n",
690 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
692 dump_cpu_list_and_out:
693 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
694 for (i = 0; i < cnt; i++)
695 printk("%u ", cpu_list[i]);
699 /* Send cross call to all processors mentioned in MASK
702 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
704 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
705 int this_cpu = get_cpu();
707 cpus_and(mask, mask, cpu_online_map);
708 cpu_clear(this_cpu, mask);
710 if (tlb_type == spitfire)
711 spitfire_xcall_deliver(data0, data1, data2, mask);
712 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
713 cheetah_xcall_deliver(data0, data1, data2, mask);
715 hypervisor_xcall_deliver(data0, data1, data2, mask);
716 /* NOTE: Caller runs local copy on master. */
721 extern unsigned long xcall_sync_tick;
723 static void smp_start_sync_tick_client(int cpu)
725 cpumask_t mask = cpumask_of_cpu(cpu);
727 smp_cross_call_masked(&xcall_sync_tick,
731 /* Send cross call to all processors except self. */
732 #define smp_cross_call(func, ctx, data1, data2) \
733 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
735 struct call_data_struct {
736 void (*func) (void *info);
742 static DEFINE_SPINLOCK(call_lock);
743 static struct call_data_struct *call_data;
745 extern unsigned long xcall_call_function;
748 * You must not call this function with disabled interrupts or from a
749 * hardware interrupt handler or from a bottom half handler.
751 static int smp_call_function_mask(void (*func)(void *info), void *info,
752 int nonatomic, int wait, cpumask_t mask)
754 struct call_data_struct data;
755 int cpus = cpus_weight(mask) - 1;
761 /* Can deadlock when called with interrupts disabled */
762 WARN_ON(irqs_disabled());
766 atomic_set(&data.finished, 0);
769 spin_lock(&call_lock);
773 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
776 * Wait for other cpus to complete function or at
777 * least snap the call data.
780 while (atomic_read(&data.finished) != cpus) {
787 spin_unlock(&call_lock);
792 spin_unlock(&call_lock);
793 printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n",
794 cpus, atomic_read(&data.finished));
798 int smp_call_function(void (*func)(void *info), void *info,
799 int nonatomic, int wait)
801 return smp_call_function_mask(func, info, nonatomic, wait,
805 void smp_call_function_client(int irq, struct pt_regs *regs)
807 void (*func) (void *info) = call_data->func;
808 void *info = call_data->info;
810 clear_softint(1 << irq);
811 if (call_data->wait) {
812 /* let initiator proceed only after completion */
814 atomic_inc(&call_data->finished);
816 /* let initiator proceed after getting data */
817 atomic_inc(&call_data->finished);
822 static void tsb_sync(void *info)
824 struct mm_struct *mm = info;
826 if (current->active_mm == mm)
827 tsb_context_switch(mm);
830 void smp_tsb_sync(struct mm_struct *mm)
832 smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
835 extern unsigned long xcall_flush_tlb_mm;
836 extern unsigned long xcall_flush_tlb_pending;
837 extern unsigned long xcall_flush_tlb_kernel_range;
838 extern unsigned long xcall_report_regs;
839 extern unsigned long xcall_receive_signal;
841 #ifdef DCACHE_ALIASING_POSSIBLE
842 extern unsigned long xcall_flush_dcache_page_cheetah;
844 extern unsigned long xcall_flush_dcache_page_spitfire;
846 #ifdef CONFIG_DEBUG_DCFLUSH
847 extern atomic_t dcpage_flushes;
848 extern atomic_t dcpage_flushes_xcall;
851 static __inline__ void __local_flush_dcache_page(struct page *page)
853 #ifdef DCACHE_ALIASING_POSSIBLE
854 __flush_dcache_page(page_address(page),
855 ((tlb_type == spitfire) &&
856 page_mapping(page) != NULL));
858 if (page_mapping(page) != NULL &&
859 tlb_type == spitfire)
860 __flush_icache_page(__pa(page_address(page)));
864 void smp_flush_dcache_page_impl(struct page *page, int cpu)
866 cpumask_t mask = cpumask_of_cpu(cpu);
869 if (tlb_type == hypervisor)
872 #ifdef CONFIG_DEBUG_DCFLUSH
873 atomic_inc(&dcpage_flushes);
876 this_cpu = get_cpu();
878 if (cpu == this_cpu) {
879 __local_flush_dcache_page(page);
880 } else if (cpu_online(cpu)) {
881 void *pg_addr = page_address(page);
884 if (tlb_type == spitfire) {
886 ((u64)&xcall_flush_dcache_page_spitfire);
887 if (page_mapping(page) != NULL)
888 data0 |= ((u64)1 << 32);
889 spitfire_xcall_deliver(data0,
893 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
894 #ifdef DCACHE_ALIASING_POSSIBLE
896 ((u64)&xcall_flush_dcache_page_cheetah);
897 cheetah_xcall_deliver(data0,
902 #ifdef CONFIG_DEBUG_DCFLUSH
903 atomic_inc(&dcpage_flushes_xcall);
910 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
912 void *pg_addr = page_address(page);
913 cpumask_t mask = cpu_online_map;
917 if (tlb_type == hypervisor)
920 this_cpu = get_cpu();
922 cpu_clear(this_cpu, mask);
924 #ifdef CONFIG_DEBUG_DCFLUSH
925 atomic_inc(&dcpage_flushes);
927 if (cpus_empty(mask))
929 if (tlb_type == spitfire) {
930 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
931 if (page_mapping(page) != NULL)
932 data0 |= ((u64)1 << 32);
933 spitfire_xcall_deliver(data0,
937 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
938 #ifdef DCACHE_ALIASING_POSSIBLE
939 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
940 cheetah_xcall_deliver(data0,
945 #ifdef CONFIG_DEBUG_DCFLUSH
946 atomic_inc(&dcpage_flushes_xcall);
949 __local_flush_dcache_page(page);
954 static void __smp_receive_signal_mask(cpumask_t mask)
956 smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
959 void smp_receive_signal(int cpu)
961 cpumask_t mask = cpumask_of_cpu(cpu);
964 __smp_receive_signal_mask(mask);
967 void smp_receive_signal_client(int irq, struct pt_regs *regs)
969 struct mm_struct *mm;
971 clear_softint(1 << irq);
973 /* See if we need to allocate a new TLB context because
974 * the version of the one we are using is now out of date.
976 mm = current->active_mm;
980 spin_lock_irqsave(&mm->context.lock, flags);
982 if (unlikely(!CTX_VALID(mm->context)))
983 get_new_mmu_context(mm);
985 load_secondary_context(mm);
986 __flush_tlb_mm(CTX_HWBITS(mm->context),
989 spin_unlock_irqrestore(&mm->context.lock, flags);
993 void smp_new_mmu_context_version(void)
995 __smp_receive_signal_mask(cpu_online_map);
998 void smp_report_regs(void)
1000 smp_cross_call(&xcall_report_regs, 0, 0, 0);
1003 /* We know that the window frames of the user have been flushed
1004 * to the stack before we get here because all callers of us
1005 * are flush_tlb_*() routines, and these run after flush_cache_*()
1006 * which performs the flushw.
1008 * The SMP TLB coherency scheme we use works as follows:
1010 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1011 * space has (potentially) executed on, this is the heuristic
1012 * we use to avoid doing cross calls.
1014 * Also, for flushing from kswapd and also for clones, we
1015 * use cpu_vm_mask as the list of cpus to make run the TLB.
1017 * 2) TLB context numbers are shared globally across all processors
1018 * in the system, this allows us to play several games to avoid
1021 * One invariant is that when a cpu switches to a process, and
1022 * that processes tsk->active_mm->cpu_vm_mask does not have the
1023 * current cpu's bit set, that tlb context is flushed locally.
1025 * If the address space is non-shared (ie. mm->count == 1) we avoid
1026 * cross calls when we want to flush the currently running process's
1027 * tlb state. This is done by clearing all cpu bits except the current
1028 * processor's in current->active_mm->cpu_vm_mask and performing the
1029 * flush locally only. This will force any subsequent cpus which run
1030 * this task to flush the context from the local tlb if the process
1031 * migrates to another cpu (again).
1033 * 3) For shared address spaces (threads) and swapping we bite the
1034 * bullet for most cases and perform the cross call (but only to
1035 * the cpus listed in cpu_vm_mask).
1037 * The performance gain from "optimizing" away the cross call for threads is
1038 * questionable (in theory the big win for threads is the massive sharing of
1039 * address space state across processors).
1042 /* This currently is only used by the hugetlb arch pre-fault
1043 * hook on UltraSPARC-III+ and later when changing the pagesize
1044 * bits of the context register for an address space.
1046 void smp_flush_tlb_mm(struct mm_struct *mm)
1048 u32 ctx = CTX_HWBITS(mm->context);
1049 int cpu = get_cpu();
1051 if (atomic_read(&mm->mm_users) == 1) {
1052 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1053 goto local_flush_and_out;
1056 smp_cross_call_masked(&xcall_flush_tlb_mm,
1060 local_flush_and_out:
1061 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1066 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1068 u32 ctx = CTX_HWBITS(mm->context);
1069 int cpu = get_cpu();
1071 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
1072 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1074 smp_cross_call_masked(&xcall_flush_tlb_pending,
1075 ctx, nr, (unsigned long) vaddrs,
1078 __flush_tlb_pending(ctx, nr, vaddrs);
1083 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1086 end = PAGE_ALIGN(end);
1088 smp_cross_call(&xcall_flush_tlb_kernel_range,
1091 __flush_tlb_kernel_range(start, end);
1096 /* #define CAPTURE_DEBUG */
1097 extern unsigned long xcall_capture;
1099 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1100 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1101 static unsigned long penguins_are_doing_time;
1103 void smp_capture(void)
1105 int result = atomic_add_ret(1, &smp_capture_depth);
1108 int ncpus = num_online_cpus();
1110 #ifdef CAPTURE_DEBUG
1111 printk("CPU[%d]: Sending penguins to jail...",
1112 smp_processor_id());
1114 penguins_are_doing_time = 1;
1115 membar_storestore_loadstore();
1116 atomic_inc(&smp_capture_registry);
1117 smp_cross_call(&xcall_capture, 0, 0, 0);
1118 while (atomic_read(&smp_capture_registry) != ncpus)
1120 #ifdef CAPTURE_DEBUG
1126 void smp_release(void)
1128 if (atomic_dec_and_test(&smp_capture_depth)) {
1129 #ifdef CAPTURE_DEBUG
1130 printk("CPU[%d]: Giving pardon to "
1131 "imprisoned penguins\n",
1132 smp_processor_id());
1134 penguins_are_doing_time = 0;
1135 membar_storeload_storestore();
1136 atomic_dec(&smp_capture_registry);
1140 /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1141 * can service tlb flush xcalls...
1143 extern void prom_world(int);
1145 void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1147 clear_softint(1 << irq);
1151 __asm__ __volatile__("flushw");
1153 atomic_inc(&smp_capture_registry);
1154 membar_storeload_storestore();
1155 while (penguins_are_doing_time)
1157 atomic_dec(&smp_capture_registry);
1163 #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
1164 #define prof_counter(__cpu) cpu_data(__cpu).counter
1166 void smp_percpu_timer_interrupt(struct pt_regs *regs)
1168 unsigned long compare, tick, pstate;
1169 int cpu = smp_processor_id();
1170 int user = user_mode(regs);
1173 * Check for level 14 softint.
1176 unsigned long tick_mask = tick_ops->softint_mask;
1178 if (!(get_softint() & tick_mask)) {
1179 extern void handler_irq(int, struct pt_regs *);
1181 handler_irq(14, regs);
1184 clear_softint(tick_mask);
1188 profile_tick(CPU_PROFILING, regs);
1189 if (!--prof_counter(cpu)) {
1192 if (cpu == boot_cpu_id) {
1193 kstat_this_cpu.irqs[0]++;
1194 timer_tick_interrupt(regs);
1197 update_process_times(user);
1201 prof_counter(cpu) = prof_multiplier(cpu);
1204 /* Guarantee that the following sequences execute
1207 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1208 "wrpr %0, %1, %%pstate"
1212 compare = tick_ops->add_compare(current_tick_offset);
1213 tick = tick_ops->get_tick();
1215 /* Restore PSTATE_IE. */
1216 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1219 } while (time_after_eq(tick, compare));
1222 static void __init smp_setup_percpu_timer(void)
1224 int cpu = smp_processor_id();
1225 unsigned long pstate;
1227 prof_counter(cpu) = prof_multiplier(cpu) = 1;
1229 /* Guarantee that the following sequences execute
1232 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1233 "wrpr %0, %1, %%pstate"
1237 tick_ops->init_tick(current_tick_offset);
1239 /* Restore PSTATE_IE. */
1240 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1245 void __init smp_tick_init(void)
1247 boot_cpu_id = hard_smp_processor_id();
1248 current_tick_offset = timer_tick_offset;
1250 cpu_set(boot_cpu_id, cpu_online_map);
1251 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
1254 /* /proc/profile writes can call this, don't __init it please. */
1255 static DEFINE_SPINLOCK(prof_setup_lock);
1257 int setup_profiling_timer(unsigned int multiplier)
1259 unsigned long flags;
1262 if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
1265 spin_lock_irqsave(&prof_setup_lock, flags);
1266 for (i = 0; i < NR_CPUS; i++)
1267 prof_multiplier(i) = multiplier;
1268 current_tick_offset = (timer_tick_offset / multiplier);
1269 spin_unlock_irqrestore(&prof_setup_lock, flags);
1274 /* Constrain the number of cpus to max_cpus. */
1275 void __init smp_prepare_cpus(unsigned int max_cpus)
1277 if (num_possible_cpus() > max_cpus) {
1281 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1282 if (mid != boot_cpu_id) {
1283 cpu_clear(mid, phys_cpu_present_map);
1284 if (num_possible_cpus() <= max_cpus)
1291 smp_store_cpu_info(boot_cpu_id);
1294 /* Set this up early so that things like the scheduler can init
1295 * properly. We use the same cpu mask for both the present and
1298 void __init smp_setup_cpu_possible_map(void)
1303 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1305 cpu_set(mid, phys_cpu_present_map);
1310 void __devinit smp_prepare_boot_cpu(void)
1312 int cpu = hard_smp_processor_id();
1314 if (cpu >= NR_CPUS) {
1315 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
1319 current_thread_info()->cpu = cpu;
1320 __local_per_cpu_offset = __per_cpu_offset(cpu);
1322 cpu_set(smp_processor_id(), cpu_online_map);
1323 cpu_set(smp_processor_id(), phys_cpu_present_map);
1326 int __devinit __cpu_up(unsigned int cpu)
1328 int ret = smp_boot_one_cpu(cpu);
1331 cpu_set(cpu, smp_commenced_mask);
1332 while (!cpu_isset(cpu, cpu_online_map))
1334 if (!cpu_isset(cpu, cpu_online_map)) {
1337 /* On SUN4V, writes to %tick and %stick are
1340 if (tlb_type != hypervisor)
1341 smp_synchronize_one_tick(cpu);
1347 void __init smp_cpus_done(unsigned int max_cpus)
1349 unsigned long bogosum = 0;
1352 for (i = 0; i < NR_CPUS; i++) {
1354 bogosum += cpu_data(i).udelay_val;
1356 printk("Total of %ld processors activated "
1357 "(%lu.%02lu BogoMIPS).\n",
1358 (long) num_online_cpus(),
1359 bogosum/(500000/HZ),
1360 (bogosum/(5000/HZ))%100);
1363 void smp_send_reschedule(int cpu)
1365 smp_receive_signal(cpu);
1368 /* This is a nop because we capture all other cpus
1369 * anyways when making the PROM active.
1371 void smp_send_stop(void)
1375 unsigned long __per_cpu_base __read_mostly;
1376 unsigned long __per_cpu_shift __read_mostly;
1378 EXPORT_SYMBOL(__per_cpu_base);
1379 EXPORT_SYMBOL(__per_cpu_shift);
1381 void __init setup_per_cpu_areas(void)
1383 unsigned long goal, size, i;
1386 /* Copy section for each CPU (we discard the original) */
1387 goal = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
1388 #ifdef CONFIG_MODULES
1389 if (goal < PERCPU_ENOUGH_ROOM)
1390 goal = PERCPU_ENOUGH_ROOM;
1392 __per_cpu_shift = 0;
1393 for (size = 1UL; size < goal; size <<= 1UL)
1396 ptr = alloc_bootmem(size * NR_CPUS);
1398 __per_cpu_base = ptr - __per_cpu_start;
1400 for (i = 0; i < NR_CPUS; i++, ptr += size)
1401 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);