dt-bindings: soc: bcm: use absolute path to other schema
[linux-2.6-microblaze.git] / fs / dlm / recoverd.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
4 **
5 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
6 **  Copyright (C) 2004-2011 Red Hat, Inc.  All rights reserved.
7 **
8 **
9 *******************************************************************************
10 ******************************************************************************/
11
12 #include "dlm_internal.h"
13 #include "lockspace.h"
14 #include "member.h"
15 #include "dir.h"
16 #include "ast.h"
17 #include "recover.h"
18 #include "lowcomms.h"
19 #include "lock.h"
20 #include "requestqueue.h"
21 #include "recoverd.h"
22
23
24 /* If the start for which we're re-enabling locking (seq) has been superseded
25    by a newer stop (ls_recover_seq), we need to leave locking disabled.
26
27    We suspend dlm_recv threads here to avoid the race where dlm_recv a) sees
28    locking stopped and b) adds a message to the requestqueue, but dlm_recoverd
29    enables locking and clears the requestqueue between a and b. */
30
31 static int enable_locking(struct dlm_ls *ls, uint64_t seq)
32 {
33         int error = -EINTR;
34
35         down_write(&ls->ls_recv_active);
36
37         spin_lock(&ls->ls_recover_lock);
38         if (ls->ls_recover_seq == seq) {
39                 set_bit(LSFL_RUNNING, &ls->ls_flags);
40                 /* unblocks processes waiting to enter the dlm */
41                 up_write(&ls->ls_in_recovery);
42                 clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
43                 error = 0;
44         }
45         spin_unlock(&ls->ls_recover_lock);
46
47         up_write(&ls->ls_recv_active);
48         return error;
49 }
50
51 static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
52 {
53         unsigned long start;
54         int error, neg = 0;
55
56         log_rinfo(ls, "dlm_recover %llu", (unsigned long long)rv->seq);
57
58         mutex_lock(&ls->ls_recoverd_active);
59
60         dlm_callback_suspend(ls);
61
62         dlm_clear_toss(ls);
63
64         /*
65          * This list of root rsb's will be the basis of most of the recovery
66          * routines.
67          */
68
69         dlm_create_root_list(ls);
70
71         /*
72          * Add or remove nodes from the lockspace's ls_nodes list.
73          */
74
75         error = dlm_recover_members(ls, rv, &neg);
76         if (error) {
77                 log_rinfo(ls, "dlm_recover_members error %d", error);
78                 goto fail;
79         }
80
81         dlm_recover_dir_nodeid(ls);
82
83         ls->ls_recover_dir_sent_res = 0;
84         ls->ls_recover_dir_sent_msg = 0;
85         ls->ls_recover_locks_in = 0;
86
87         dlm_set_recover_status(ls, DLM_RS_NODES);
88
89         error = dlm_recover_members_wait(ls);
90         if (error) {
91                 log_rinfo(ls, "dlm_recover_members_wait error %d", error);
92                 goto fail;
93         }
94
95         start = jiffies;
96
97         /*
98          * Rebuild our own share of the directory by collecting from all other
99          * nodes their master rsb names that hash to us.
100          */
101
102         error = dlm_recover_directory(ls);
103         if (error) {
104                 log_rinfo(ls, "dlm_recover_directory error %d", error);
105                 goto fail;
106         }
107
108         dlm_set_recover_status(ls, DLM_RS_DIR);
109
110         error = dlm_recover_directory_wait(ls);
111         if (error) {
112                 log_rinfo(ls, "dlm_recover_directory_wait error %d", error);
113                 goto fail;
114         }
115
116         log_rinfo(ls, "dlm_recover_directory %u out %u messages",
117                   ls->ls_recover_dir_sent_res, ls->ls_recover_dir_sent_msg);
118
119         /*
120          * We may have outstanding operations that are waiting for a reply from
121          * a failed node.  Mark these to be resent after recovery.  Unlock and
122          * cancel ops can just be completed.
123          */
124
125         dlm_recover_waiters_pre(ls);
126
127         if (dlm_recovery_stopped(ls)) {
128                 error = -EINTR;
129                 goto fail;
130         }
131
132         if (neg || dlm_no_directory(ls)) {
133                 /*
134                  * Clear lkb's for departed nodes.
135                  */
136
137                 dlm_recover_purge(ls);
138
139                 /*
140                  * Get new master nodeid's for rsb's that were mastered on
141                  * departed nodes.
142                  */
143
144                 error = dlm_recover_masters(ls);
145                 if (error) {
146                         log_rinfo(ls, "dlm_recover_masters error %d", error);
147                         goto fail;
148                 }
149
150                 /*
151                  * Send our locks on remastered rsb's to the new masters.
152                  */
153
154                 error = dlm_recover_locks(ls);
155                 if (error) {
156                         log_rinfo(ls, "dlm_recover_locks error %d", error);
157                         goto fail;
158                 }
159
160                 dlm_set_recover_status(ls, DLM_RS_LOCKS);
161
162                 error = dlm_recover_locks_wait(ls);
163                 if (error) {
164                         log_rinfo(ls, "dlm_recover_locks_wait error %d", error);
165                         goto fail;
166                 }
167
168                 log_rinfo(ls, "dlm_recover_locks %u in",
169                           ls->ls_recover_locks_in);
170
171                 /*
172                  * Finalize state in master rsb's now that all locks can be
173                  * checked.  This includes conversion resolution and lvb
174                  * settings.
175                  */
176
177                 dlm_recover_rsbs(ls);
178         } else {
179                 /*
180                  * Other lockspace members may be going through the "neg" steps
181                  * while also adding us to the lockspace, in which case they'll
182                  * be doing the recover_locks (RS_LOCKS) barrier.
183                  */
184                 dlm_set_recover_status(ls, DLM_RS_LOCKS);
185
186                 error = dlm_recover_locks_wait(ls);
187                 if (error) {
188                         log_rinfo(ls, "dlm_recover_locks_wait error %d", error);
189                         goto fail;
190                 }
191         }
192
193         dlm_release_root_list(ls);
194
195         /*
196          * Purge directory-related requests that are saved in requestqueue.
197          * All dir requests from before recovery are invalid now due to the dir
198          * rebuild and will be resent by the requesting nodes.
199          */
200
201         dlm_purge_requestqueue(ls);
202
203         dlm_set_recover_status(ls, DLM_RS_DONE);
204
205         error = dlm_recover_done_wait(ls);
206         if (error) {
207                 log_rinfo(ls, "dlm_recover_done_wait error %d", error);
208                 goto fail;
209         }
210
211         dlm_clear_members_gone(ls);
212
213         dlm_adjust_timeouts(ls);
214
215         dlm_callback_resume(ls);
216
217         error = enable_locking(ls, rv->seq);
218         if (error) {
219                 log_rinfo(ls, "enable_locking error %d", error);
220                 goto fail;
221         }
222
223         error = dlm_process_requestqueue(ls);
224         if (error) {
225                 log_rinfo(ls, "dlm_process_requestqueue error %d", error);
226                 goto fail;
227         }
228
229         error = dlm_recover_waiters_post(ls);
230         if (error) {
231                 log_rinfo(ls, "dlm_recover_waiters_post error %d", error);
232                 goto fail;
233         }
234
235         dlm_recover_grant(ls);
236
237         log_rinfo(ls, "dlm_recover %llu generation %u done: %u ms",
238                   (unsigned long long)rv->seq, ls->ls_generation,
239                   jiffies_to_msecs(jiffies - start));
240         mutex_unlock(&ls->ls_recoverd_active);
241
242         dlm_lsop_recover_done(ls);
243         return 0;
244
245  fail:
246         dlm_release_root_list(ls);
247         log_rinfo(ls, "dlm_recover %llu error %d",
248                   (unsigned long long)rv->seq, error);
249         mutex_unlock(&ls->ls_recoverd_active);
250         return error;
251 }
252
253 /* The dlm_ls_start() that created the rv we take here may already have been
254    stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP
255    flag set. */
256
257 static void do_ls_recovery(struct dlm_ls *ls)
258 {
259         struct dlm_recover *rv = NULL;
260
261         spin_lock(&ls->ls_recover_lock);
262         rv = ls->ls_recover_args;
263         ls->ls_recover_args = NULL;
264         if (rv && ls->ls_recover_seq == rv->seq)
265                 clear_bit(LSFL_RECOVER_STOP, &ls->ls_flags);
266         spin_unlock(&ls->ls_recover_lock);
267
268         if (rv) {
269                 ls_recover(ls, rv);
270                 kfree(rv->nodes);
271                 kfree(rv);
272         }
273 }
274
275 static int dlm_recoverd(void *arg)
276 {
277         struct dlm_ls *ls;
278
279         ls = dlm_find_lockspace_local(arg);
280         if (!ls) {
281                 log_print("dlm_recoverd: no lockspace %p", arg);
282                 return -1;
283         }
284
285         down_write(&ls->ls_in_recovery);
286         set_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
287         wake_up(&ls->ls_recover_lock_wait);
288
289         while (1) {
290                 /*
291                  * We call kthread_should_stop() after set_current_state().
292                  * This is because it works correctly if kthread_stop() is
293                  * called just before set_current_state().
294                  */
295                 set_current_state(TASK_INTERRUPTIBLE);
296                 if (kthread_should_stop()) {
297                         set_current_state(TASK_RUNNING);
298                         break;
299                 }
300                 if (!test_bit(LSFL_RECOVER_WORK, &ls->ls_flags) &&
301                     !test_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) {
302                         if (kthread_should_stop())
303                                 break;
304                         schedule();
305                 }
306                 set_current_state(TASK_RUNNING);
307
308                 if (test_and_clear_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) {
309                         down_write(&ls->ls_in_recovery);
310                         set_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
311                         wake_up(&ls->ls_recover_lock_wait);
312                 }
313
314                 if (test_and_clear_bit(LSFL_RECOVER_WORK, &ls->ls_flags))
315                         do_ls_recovery(ls);
316         }
317
318         if (test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags))
319                 up_write(&ls->ls_in_recovery);
320
321         dlm_put_lockspace(ls);
322         return 0;
323 }
324
325 int dlm_recoverd_start(struct dlm_ls *ls)
326 {
327         struct task_struct *p;
328         int error = 0;
329
330         p = kthread_run(dlm_recoverd, ls, "dlm_recoverd");
331         if (IS_ERR(p))
332                 error = PTR_ERR(p);
333         else
334                 ls->ls_recoverd_task = p;
335         return error;
336 }
337
338 void dlm_recoverd_stop(struct dlm_ls *ls)
339 {
340         kthread_stop(ls->ls_recoverd_task);
341 }
342
343 void dlm_recoverd_suspend(struct dlm_ls *ls)
344 {
345         wake_up(&ls->ls_wait_general);
346         mutex_lock(&ls->ls_recoverd_active);
347 }
348
349 void dlm_recoverd_resume(struct dlm_ls *ls)
350 {
351         mutex_unlock(&ls->ls_recoverd_active);
352 }
353