1 #ifndef __ASM_SH_SYSTEM_H
2 #define __ASM_SH_SYSTEM_H
5 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
6 * Copyright (C) 2002 Paul Mundt
12 * switch_to() should switch tasks to task nr n, first
15 #define switch_to(prev, next, last) do { \
16 struct task_struct *__last; \
17 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \
18 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \
19 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
20 register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \
21 register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \
22 register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \
23 __asm__ __volatile__ (".balign 4\n\t" \
24 "stc.l gbr, @-r15\n\t" \
25 "sts.l pr, @-r15\n\t" \
26 "mov.l r8, @-r15\n\t" \
27 "mov.l r9, @-r15\n\t" \
28 "mov.l r10, @-r15\n\t" \
29 "mov.l r11, @-r15\n\t" \
30 "mov.l r12, @-r15\n\t" \
31 "mov.l r13, @-r15\n\t" \
32 "mov.l r14, @-r15\n\t" \
33 "mov.l r15, @r1 ! save SP\n\t" \
34 "mov.l @r6, r15 ! change to new stack\n\t" \
36 "mov.l %0, @r2 ! save PC\n\t" \
38 "jmp @%0 ! call __switch_to\n\t" \
39 " lds r7, pr ! with return to new PC\n\t" \
42 ".long __switch_to\n" \
44 "mov.l @r15+, r14\n\t" \
45 "mov.l @r15+, r13\n\t" \
46 "mov.l @r15+, r12\n\t" \
47 "mov.l @r15+, r11\n\t" \
48 "mov.l @r15+, r10\n\t" \
49 "mov.l @r15+, r9\n\t" \
50 "mov.l @r15+, r8\n\t" \
51 "lds.l @r15+, pr\n\t" \
52 "ldc.l @r15+, gbr\n\t" \
54 : "r" (__ts1), "r" (__ts2), "r" (__ts4), \
55 "r" (__ts5), "r" (__ts6), "r" (__ts7) \
61 * On SMP systems, when the scheduler does migration-cost autodetection,
62 * it needs a way to flush as much of the CPU's caches as possible.
66 static inline void sched_cacheflush(void)
70 #define nop() __asm__ __volatile__ ("nop")
73 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
75 static __inline__ unsigned long tas(volatile int *m)
76 { /* #define tas(ptr) (xchg((ptr),1)) */
79 __asm__ __volatile__ ("tas.b @%1\n\t"
81 : "=r" (retval): "r" (m): "t", "memory");
85 extern void __xchg_called_with_bad_pointer(void);
87 #ifdef CONFIG_CPU_SH4A
88 #define mb() __asm__ __volatile__ ("synco": : :"memory")
90 #define wmb() __asm__ __volatile__ ("synco": : :"memory")
91 #define read_barrier_depends() do { } while(0)
93 #define mb() __asm__ __volatile__ ("": : :"memory")
95 #define wmb() __asm__ __volatile__ ("": : :"memory")
96 #define read_barrier_depends() do { } while(0)
100 #define smp_mb() mb()
101 #define smp_rmb() rmb()
102 #define smp_wmb() wmb()
103 #define smp_read_barrier_depends() read_barrier_depends()
105 #define smp_mb() barrier()
106 #define smp_rmb() barrier()
107 #define smp_wmb() barrier()
108 #define smp_read_barrier_depends() do { } while(0)
111 #define set_mb(var, value) do { xchg(&var, value); } while (0)
113 /* Interrupt Control */
114 static __inline__ void local_irq_enable(void)
116 unsigned long __dummy0, __dummy1;
118 __asm__ __volatile__("stc sr, %0\n\t"
120 "stc r6_bank, %1\n\t"
123 : "=&r" (__dummy0), "=r" (__dummy1)
128 static __inline__ void local_irq_disable(void)
130 unsigned long __dummy;
131 __asm__ __volatile__("stc sr, %0\n\t"
139 #define local_save_flags(x) \
140 __asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" )
142 #define irqs_disabled() \
144 unsigned long flags; \
145 local_save_flags(flags); \
149 static __inline__ unsigned long local_irq_save(void)
151 unsigned long flags, __dummy;
153 __asm__ __volatile__("stc sr, %1\n\t"
159 : "=&z" (flags), "=&r" (__dummy)
166 static __inline__ void local_irq_restore(unsigned long x)
168 if ((x & 0x000000f0) != 0x000000f0)
172 local_save_flags(flags);
175 extern void dump_stack(void);
176 printk(KERN_ERR "BUG!\n");
183 #define local_irq_restore(x) do { \
184 if ((x & 0x000000f0) != 0x000000f0) \
185 local_irq_enable(); \
189 #define really_restore_flags(x) do { \
190 if ((x & 0x000000f0) != 0x000000f0) \
191 local_irq_enable(); \
193 local_irq_disable(); \
198 * When handling TLB or caches, we need to do it from P2 area.
200 #define jump_to_P2() \
202 unsigned long __dummy; \
203 __asm__ __volatile__( \
212 : "r" (0x20000000)); \
218 #define back_to_P1() \
220 unsigned long __dummy; \
221 __asm__ __volatile__( \
222 "nop;nop;nop;nop;nop;nop;nop\n\t" \
229 : "=&r" (__dummy)); \
232 /* For spinlocks etc */
233 #define local_irq_save(x) x = local_irq_save()
235 static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
237 unsigned long flags, retval;
239 local_irq_save(flags);
242 local_irq_restore(flags);
246 static __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
248 unsigned long flags, retval;
250 local_irq_save(flags);
253 local_irq_restore(flags);
257 static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
261 return xchg_u32(ptr, x);
264 return xchg_u8(ptr, x);
267 __xchg_called_with_bad_pointer();
271 static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
277 local_irq_save(flags);
281 local_irq_restore(flags); /* implies memory barrier */
285 /* This function doesn't exist, so you'll get a linker error
286 * if something tries to do an invalid cmpxchg(). */
287 extern void __cmpxchg_called_with_bad_pointer(void);
289 #define __HAVE_ARCH_CMPXCHG 1
291 static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
292 unsigned long new, int size)
296 return __cmpxchg_u32(ptr, old, new);
298 __cmpxchg_called_with_bad_pointer();
302 #define cmpxchg(ptr,o,n) \
304 __typeof__(*(ptr)) _o_ = (o); \
305 __typeof__(*(ptr)) _n_ = (n); \
306 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
307 (unsigned long)_n_, sizeof(*(ptr))); \
311 * disable hlt during certain critical i/o operations
313 #define HAVE_DISABLE_HLT
314 void disable_hlt(void);
315 void enable_hlt(void);
317 #define arch_align_stack(x) (x)