Merge branch 'i2c/for-mergewindow' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / fs / ext4 / mmp.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/fs.h>
3 #include <linux/random.h>
4 #include <linux/buffer_head.h>
5 #include <linux/utsname.h>
6 #include <linux/kthread.h>
7
8 #include "ext4.h"
9
10 /* Checksumming functions */
11 static __le32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp)
12 {
13         struct ext4_sb_info *sbi = EXT4_SB(sb);
14         int offset = offsetof(struct mmp_struct, mmp_checksum);
15         __u32 csum;
16
17         csum = ext4_chksum(sbi, sbi->s_csum_seed, (char *)mmp, offset);
18
19         return cpu_to_le32(csum);
20 }
21
22 static int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
23 {
24         if (!ext4_has_metadata_csum(sb))
25                 return 1;
26
27         return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp);
28 }
29
30 static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
31 {
32         if (!ext4_has_metadata_csum(sb))
33                 return;
34
35         mmp->mmp_checksum = ext4_mmp_csum(sb, mmp);
36 }
37
38 /*
39  * Write the MMP block using REQ_SYNC to try to get the block on-disk
40  * faster.
41  */
42 static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
43 {
44         struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data);
45
46         /*
47          * We protect against freezing so that we don't create dirty buffers
48          * on frozen filesystem.
49          */
50         sb_start_write(sb);
51         ext4_mmp_csum_set(sb, mmp);
52         lock_buffer(bh);
53         bh->b_end_io = end_buffer_write_sync;
54         get_bh(bh);
55         submit_bh(REQ_OP_WRITE, REQ_SYNC | REQ_META | REQ_PRIO, bh);
56         wait_on_buffer(bh);
57         sb_end_write(sb);
58         if (unlikely(!buffer_uptodate(bh)))
59                 return -EIO;
60
61         return 0;
62 }
63
64 /*
65  * Read the MMP block. It _must_ be read from disk and hence we clear the
66  * uptodate flag on the buffer.
67  */
68 static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
69                           ext4_fsblk_t mmp_block)
70 {
71         struct mmp_struct *mmp;
72         int ret;
73
74         if (*bh)
75                 clear_buffer_uptodate(*bh);
76
77         /* This would be sb_bread(sb, mmp_block), except we need to be sure
78          * that the MD RAID device cache has been bypassed, and that the read
79          * is not blocked in the elevator. */
80         if (!*bh) {
81                 *bh = sb_getblk(sb, mmp_block);
82                 if (!*bh) {
83                         ret = -ENOMEM;
84                         goto warn_exit;
85                 }
86         }
87
88         lock_buffer(*bh);
89         ret = ext4_read_bh(*bh, REQ_META | REQ_PRIO, NULL);
90         if (ret)
91                 goto warn_exit;
92
93         mmp = (struct mmp_struct *)((*bh)->b_data);
94         if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC) {
95                 ret = -EFSCORRUPTED;
96                 goto warn_exit;
97         }
98         if (!ext4_mmp_csum_verify(sb, mmp)) {
99                 ret = -EFSBADCRC;
100                 goto warn_exit;
101         }
102         return 0;
103 warn_exit:
104         brelse(*bh);
105         *bh = NULL;
106         ext4_warning(sb, "Error %d while reading MMP block %llu",
107                      ret, mmp_block);
108         return ret;
109 }
110
111 /*
112  * Dump as much information as possible to help the admin.
113  */
114 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
115                     const char *function, unsigned int line, const char *msg)
116 {
117         __ext4_warning(sb, function, line, "%s", msg);
118         __ext4_warning(sb, function, line,
119                        "MMP failure info: last update time: %llu, last update node: %.*s, last update device: %.*s",
120                        (unsigned long long)le64_to_cpu(mmp->mmp_time),
121                        (int)sizeof(mmp->mmp_nodename), mmp->mmp_nodename,
122                        (int)sizeof(mmp->mmp_bdevname), mmp->mmp_bdevname);
123 }
124
125 /*
126  * kmmpd will update the MMP sequence every s_mmp_update_interval seconds
127  */
128 static int kmmpd(void *data)
129 {
130         struct super_block *sb = (struct super_block *) data;
131         struct ext4_super_block *es = EXT4_SB(sb)->s_es;
132         struct buffer_head *bh = EXT4_SB(sb)->s_mmp_bh;
133         struct mmp_struct *mmp;
134         ext4_fsblk_t mmp_block;
135         u32 seq = 0;
136         unsigned long failed_writes = 0;
137         int mmp_update_interval = le16_to_cpu(es->s_mmp_update_interval);
138         unsigned mmp_check_interval;
139         unsigned long last_update_time;
140         unsigned long diff;
141         int retval = 0;
142
143         mmp_block = le64_to_cpu(es->s_mmp_block);
144         mmp = (struct mmp_struct *)(bh->b_data);
145         mmp->mmp_time = cpu_to_le64(ktime_get_real_seconds());
146         /*
147          * Start with the higher mmp_check_interval and reduce it if
148          * the MMP block is being updated on time.
149          */
150         mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
151                                  EXT4_MMP_MIN_CHECK_INTERVAL);
152         mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
153         BUILD_BUG_ON(sizeof(mmp->mmp_bdevname) < BDEVNAME_SIZE);
154         bdevname(bh->b_bdev, mmp->mmp_bdevname);
155
156         memcpy(mmp->mmp_nodename, init_utsname()->nodename,
157                sizeof(mmp->mmp_nodename));
158
159         while (!kthread_should_stop() && !sb_rdonly(sb)) {
160                 if (!ext4_has_feature_mmp(sb)) {
161                         ext4_warning(sb, "kmmpd being stopped since MMP feature"
162                                      " has been disabled.");
163                         goto wait_to_exit;
164                 }
165                 if (++seq > EXT4_MMP_SEQ_MAX)
166                         seq = 1;
167
168                 mmp->mmp_seq = cpu_to_le32(seq);
169                 mmp->mmp_time = cpu_to_le64(ktime_get_real_seconds());
170                 last_update_time = jiffies;
171
172                 retval = write_mmp_block(sb, bh);
173                 /*
174                  * Don't spew too many error messages. Print one every
175                  * (s_mmp_update_interval * 60) seconds.
176                  */
177                 if (retval) {
178                         if ((failed_writes % 60) == 0) {
179                                 ext4_error_err(sb, -retval,
180                                                "Error writing to MMP block");
181                         }
182                         failed_writes++;
183                 }
184
185                 diff = jiffies - last_update_time;
186                 if (diff < mmp_update_interval * HZ)
187                         schedule_timeout_interruptible(mmp_update_interval *
188                                                        HZ - diff);
189
190                 /*
191                  * We need to make sure that more than mmp_check_interval
192                  * seconds have not passed since writing. If that has happened
193                  * we need to check if the MMP block is as we left it.
194                  */
195                 diff = jiffies - last_update_time;
196                 if (diff > mmp_check_interval * HZ) {
197                         struct buffer_head *bh_check = NULL;
198                         struct mmp_struct *mmp_check;
199
200                         retval = read_mmp_block(sb, &bh_check, mmp_block);
201                         if (retval) {
202                                 ext4_error_err(sb, -retval,
203                                                "error reading MMP data: %d",
204                                                retval);
205                                 goto wait_to_exit;
206                         }
207
208                         mmp_check = (struct mmp_struct *)(bh_check->b_data);
209                         if (mmp->mmp_seq != mmp_check->mmp_seq ||
210                             memcmp(mmp->mmp_nodename, mmp_check->mmp_nodename,
211                                    sizeof(mmp->mmp_nodename))) {
212                                 dump_mmp_msg(sb, mmp_check,
213                                              "Error while updating MMP info. "
214                                              "The filesystem seems to have been"
215                                              " multiply mounted.");
216                                 ext4_error_err(sb, EBUSY, "abort");
217                                 put_bh(bh_check);
218                                 retval = -EBUSY;
219                                 goto wait_to_exit;
220                         }
221                         put_bh(bh_check);
222                 }
223
224                  /*
225                  * Adjust the mmp_check_interval depending on how much time
226                  * it took for the MMP block to be written.
227                  */
228                 mmp_check_interval = max(min(EXT4_MMP_CHECK_MULT * diff / HZ,
229                                              EXT4_MMP_MAX_CHECK_INTERVAL),
230                                          EXT4_MMP_MIN_CHECK_INTERVAL);
231                 mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
232         }
233
234         /*
235          * Unmount seems to be clean.
236          */
237         mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
238         mmp->mmp_time = cpu_to_le64(ktime_get_real_seconds());
239
240         retval = write_mmp_block(sb, bh);
241
242 wait_to_exit:
243         while (!kthread_should_stop()) {
244                 set_current_state(TASK_INTERRUPTIBLE);
245                 if (!kthread_should_stop())
246                         schedule();
247         }
248         set_current_state(TASK_RUNNING);
249         return retval;
250 }
251
252 void ext4_stop_mmpd(struct ext4_sb_info *sbi)
253 {
254         if (sbi->s_mmp_tsk) {
255                 kthread_stop(sbi->s_mmp_tsk);
256                 brelse(sbi->s_mmp_bh);
257                 sbi->s_mmp_tsk = NULL;
258         }
259 }
260
261 /*
262  * Get a random new sequence number but make sure it is not greater than
263  * EXT4_MMP_SEQ_MAX.
264  */
265 static unsigned int mmp_new_seq(void)
266 {
267         u32 new_seq;
268
269         do {
270                 new_seq = prandom_u32();
271         } while (new_seq > EXT4_MMP_SEQ_MAX);
272
273         return new_seq;
274 }
275
276 /*
277  * Protect the filesystem from being mounted more than once.
278  */
279 int ext4_multi_mount_protect(struct super_block *sb,
280                                     ext4_fsblk_t mmp_block)
281 {
282         struct ext4_super_block *es = EXT4_SB(sb)->s_es;
283         struct buffer_head *bh = NULL;
284         struct mmp_struct *mmp = NULL;
285         u32 seq;
286         unsigned int mmp_check_interval = le16_to_cpu(es->s_mmp_update_interval);
287         unsigned int wait_time = 0;
288         int retval;
289
290         if (mmp_block < le32_to_cpu(es->s_first_data_block) ||
291             mmp_block >= ext4_blocks_count(es)) {
292                 ext4_warning(sb, "Invalid MMP block in superblock");
293                 goto failed;
294         }
295
296         retval = read_mmp_block(sb, &bh, mmp_block);
297         if (retval)
298                 goto failed;
299
300         mmp = (struct mmp_struct *)(bh->b_data);
301
302         if (mmp_check_interval < EXT4_MMP_MIN_CHECK_INTERVAL)
303                 mmp_check_interval = EXT4_MMP_MIN_CHECK_INTERVAL;
304
305         /*
306          * If check_interval in MMP block is larger, use that instead of
307          * update_interval from the superblock.
308          */
309         if (le16_to_cpu(mmp->mmp_check_interval) > mmp_check_interval)
310                 mmp_check_interval = le16_to_cpu(mmp->mmp_check_interval);
311
312         seq = le32_to_cpu(mmp->mmp_seq);
313         if (seq == EXT4_MMP_SEQ_CLEAN)
314                 goto skip;
315
316         if (seq == EXT4_MMP_SEQ_FSCK) {
317                 dump_mmp_msg(sb, mmp, "fsck is running on the filesystem");
318                 goto failed;
319         }
320
321         wait_time = min(mmp_check_interval * 2 + 1,
322                         mmp_check_interval + 60);
323
324         /* Print MMP interval if more than 20 secs. */
325         if (wait_time > EXT4_MMP_MIN_CHECK_INTERVAL * 4)
326                 ext4_warning(sb, "MMP interval %u higher than expected, please"
327                              " wait.\n", wait_time * 2);
328
329         if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
330                 ext4_warning(sb, "MMP startup interrupted, failing mount\n");
331                 goto failed;
332         }
333
334         retval = read_mmp_block(sb, &bh, mmp_block);
335         if (retval)
336                 goto failed;
337         mmp = (struct mmp_struct *)(bh->b_data);
338         if (seq != le32_to_cpu(mmp->mmp_seq)) {
339                 dump_mmp_msg(sb, mmp,
340                              "Device is already active on another node.");
341                 goto failed;
342         }
343
344 skip:
345         /*
346          * write a new random sequence number.
347          */
348         seq = mmp_new_seq();
349         mmp->mmp_seq = cpu_to_le32(seq);
350
351         retval = write_mmp_block(sb, bh);
352         if (retval)
353                 goto failed;
354
355         /*
356          * wait for MMP interval and check mmp_seq.
357          */
358         if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
359                 ext4_warning(sb, "MMP startup interrupted, failing mount");
360                 goto failed;
361         }
362
363         retval = read_mmp_block(sb, &bh, mmp_block);
364         if (retval)
365                 goto failed;
366         mmp = (struct mmp_struct *)(bh->b_data);
367         if (seq != le32_to_cpu(mmp->mmp_seq)) {
368                 dump_mmp_msg(sb, mmp,
369                              "Device is already active on another node.");
370                 goto failed;
371         }
372
373         EXT4_SB(sb)->s_mmp_bh = bh;
374
375         /*
376          * Start a kernel thread to update the MMP block periodically.
377          */
378         EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, sb, "kmmpd-%.*s",
379                                              (int)sizeof(mmp->mmp_bdevname),
380                                              bdevname(bh->b_bdev,
381                                                       mmp->mmp_bdevname));
382         if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
383                 EXT4_SB(sb)->s_mmp_tsk = NULL;
384                 ext4_warning(sb, "Unable to create kmmpd thread for %s.",
385                              sb->s_id);
386                 goto failed;
387         }
388
389         return 0;
390
391 failed:
392         brelse(bh);
393         return 1;
394 }