s390/spinlock: fix system hang with spin_retry <= 0
[linux-2.6-microblaze.git] / arch / s390 / lib / spinlock.c
1 /*
2  *    Out of line spinlock code.
3  *
4  *    Copyright IBM Corp. 2004, 2006
5  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6  */
7
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <asm/io.h>
14
15 int spin_retry = 1000;
16
17 /**
18  * spin_retry= parameter
19  */
20 static int __init spin_retry_setup(char *str)
21 {
22         spin_retry = simple_strtoul(str, &str, 0);
23         return 1;
24 }
25 __setup("spin_retry=", spin_retry_setup);
26
27 void arch_spin_lock_wait(arch_spinlock_t *lp)
28 {
29         unsigned int cpu = SPINLOCK_LOCKVAL;
30         unsigned int owner;
31         int count;
32
33         while (1) {
34                 owner = lp->lock;
35                 if (!owner || smp_vcpu_scheduled(~owner)) {
36                         count = spin_retry;
37                         do {
38                                 if (arch_spin_is_locked(lp))
39                                         continue;
40                                 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
41                                         return;
42                         } while (count-- > 0);
43                         if (MACHINE_IS_LPAR)
44                                 continue;
45                 }
46                 owner = lp->lock;
47                 if (owner)
48                         smp_yield_cpu(~owner);
49                 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
50                         return;
51         }
52 }
53 EXPORT_SYMBOL(arch_spin_lock_wait);
54
55 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
56 {
57         unsigned int cpu = SPINLOCK_LOCKVAL;
58         unsigned int owner;
59         int count;
60
61         local_irq_restore(flags);
62         while (1) {
63                 owner = lp->lock;
64                 if (!owner || smp_vcpu_scheduled(~owner)) {
65                         count = spin_retry;
66                         do {
67                                 if (arch_spin_is_locked(lp))
68                                         continue;
69                                 local_irq_disable();
70                                 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
71                                         return;
72                                 local_irq_restore(flags);
73                         } while (count-- > 0);
74                         if (MACHINE_IS_LPAR)
75                                 continue;
76                 }
77                 owner = lp->lock;
78                 if (owner)
79                         smp_yield_cpu(~owner);
80                 local_irq_disable();
81                 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
82                         return;
83                 local_irq_restore(flags);
84         }
85 }
86 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
87
88 void arch_spin_relax(arch_spinlock_t *lp)
89 {
90         unsigned int cpu = lp->lock;
91         if (cpu != 0) {
92                 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
93                     !smp_vcpu_scheduled(~cpu))
94                         smp_yield_cpu(~cpu);
95         }
96 }
97 EXPORT_SYMBOL(arch_spin_relax);
98
99 int arch_spin_trylock_retry(arch_spinlock_t *lp)
100 {
101         int count;
102
103         for (count = spin_retry; count > 0; count--) {
104                 if (arch_spin_is_locked(lp))
105                         continue;
106                 if (arch_spin_trylock_once(lp))
107                         return 1;
108         }
109         return 0;
110 }
111 EXPORT_SYMBOL(arch_spin_trylock_retry);
112
113 void _raw_read_lock_wait(arch_rwlock_t *rw)
114 {
115         unsigned int old;
116         int count = spin_retry;
117
118         while (1) {
119                 if (count-- <= 0) {
120                         smp_yield();
121                         count = spin_retry;
122                 }
123                 if (!arch_read_can_lock(rw))
124                         continue;
125                 old = rw->lock & 0x7fffffffU;
126                 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
127                         return;
128         }
129 }
130 EXPORT_SYMBOL(_raw_read_lock_wait);
131
132 void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
133 {
134         unsigned int old;
135         int count = spin_retry;
136
137         local_irq_restore(flags);
138         while (1) {
139                 if (count-- <= 0) {
140                         smp_yield();
141                         count = spin_retry;
142                 }
143                 if (!arch_read_can_lock(rw))
144                         continue;
145                 old = rw->lock & 0x7fffffffU;
146                 local_irq_disable();
147                 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
148                         return;
149         }
150 }
151 EXPORT_SYMBOL(_raw_read_lock_wait_flags);
152
153 int _raw_read_trylock_retry(arch_rwlock_t *rw)
154 {
155         unsigned int old;
156         int count = spin_retry;
157
158         while (count-- > 0) {
159                 if (!arch_read_can_lock(rw))
160                         continue;
161                 old = rw->lock & 0x7fffffffU;
162                 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
163                         return 1;
164         }
165         return 0;
166 }
167 EXPORT_SYMBOL(_raw_read_trylock_retry);
168
169 void _raw_write_lock_wait(arch_rwlock_t *rw)
170 {
171         int count = spin_retry;
172
173         while (1) {
174                 if (count-- <= 0) {
175                         smp_yield();
176                         count = spin_retry;
177                 }
178                 if (!arch_write_can_lock(rw))
179                         continue;
180                 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
181                         return;
182         }
183 }
184 EXPORT_SYMBOL(_raw_write_lock_wait);
185
186 void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
187 {
188         int count = spin_retry;
189
190         local_irq_restore(flags);
191         while (1) {
192                 if (count-- <= 0) {
193                         smp_yield();
194                         count = spin_retry;
195                 }
196                 if (!arch_write_can_lock(rw))
197                         continue;
198                 local_irq_disable();
199                 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
200                         return;
201         }
202 }
203 EXPORT_SYMBOL(_raw_write_lock_wait_flags);
204
205 int _raw_write_trylock_retry(arch_rwlock_t *rw)
206 {
207         int count = spin_retry;
208
209         while (count-- > 0) {
210                 if (!arch_write_can_lock(rw))
211                         continue;
212                 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
213                         return 1;
214         }
215         return 0;
216 }
217 EXPORT_SYMBOL(_raw_write_trylock_retry);