Merge tag 'edac_updates_for_v6.6' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / fs / pstore / platform.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Persistent Storage - platform driver interface parts.
4  *
5  * Copyright (C) 2007-2008 Google, Inc.
6  * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
7  */
8
9 #define pr_fmt(fmt) "pstore: " fmt
10
11 #include <linux/atomic.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/kmsg_dump.h>
16 #include <linux/console.h>
17 #include <linux/mm.h>
18 #include <linux/module.h>
19 #include <linux/pstore.h>
20 #include <linux/string.h>
21 #include <linux/timer.h>
22 #include <linux/slab.h>
23 #include <linux/uaccess.h>
24 #include <linux/jiffies.h>
25 #include <linux/vmalloc.h>
26 #include <linux/workqueue.h>
27 #include <linux/zlib.h>
28
29 #include "internal.h"
30
31 /*
32  * We defer making "oops" entries appear in pstore - see
33  * whether the system is actually still running well enough
34  * to let someone see the entry
35  */
36 static int pstore_update_ms = -1;
37 module_param_named(update_ms, pstore_update_ms, int, 0600);
38 MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
39                  "(default is -1, which means runtime updates are disabled; "
40                  "enabling this option may not be safe; it may lead to further "
41                  "corruption on Oopses)");
42
43 /* Names should be in the same order as the enum pstore_type_id */
44 static const char * const pstore_type_names[] = {
45         "dmesg",
46         "mce",
47         "console",
48         "ftrace",
49         "rtas",
50         "powerpc-ofw",
51         "powerpc-common",
52         "pmsg",
53         "powerpc-opal",
54 };
55
56 static int pstore_new_entry;
57
58 static void pstore_timefunc(struct timer_list *);
59 static DEFINE_TIMER(pstore_timer, pstore_timefunc);
60
61 static void pstore_dowork(struct work_struct *);
62 static DECLARE_WORK(pstore_work, pstore_dowork);
63
64 /*
65  * psinfo_lock protects "psinfo" during calls to
66  * pstore_register(), pstore_unregister(), and
67  * the filesystem mount/unmount routines.
68  */
69 static DEFINE_MUTEX(psinfo_lock);
70 struct pstore_info *psinfo;
71
72 static char *backend;
73 module_param(backend, charp, 0444);
74 MODULE_PARM_DESC(backend, "specific backend to use");
75
76 /*
77  * pstore no longer implements compression via the crypto API, and only
78  * supports zlib deflate compression implemented using the zlib library
79  * interface. This removes additional complexity which is hard to justify for a
80  * diagnostic facility that has to operate in conditions where the system may
81  * have become unstable. Zlib deflate is comparatively small in terms of code
82  * size, and compresses ASCII text comparatively well. In terms of compression
83  * speed, deflate is not the best performer but for recording the log output on
84  * a kernel panic, this is not considered critical.
85  *
86  * The only remaining arguments supported by the compress= module parameter are
87  * 'deflate' and 'none'. To retain compatibility with existing installations,
88  * all other values are logged and replaced with 'deflate'.
89  */
90 static char *compress = "deflate";
91 module_param(compress, charp, 0444);
92 MODULE_PARM_DESC(compress, "compression to use");
93
94 /* How much of the kernel log to snapshot */
95 unsigned long kmsg_bytes = CONFIG_PSTORE_DEFAULT_KMSG_BYTES;
96 module_param(kmsg_bytes, ulong, 0444);
97 MODULE_PARM_DESC(kmsg_bytes, "amount of kernel log to snapshot (in bytes)");
98
99 static void *compress_workspace;
100
101 static char *big_oops_buf;
102
103 void pstore_set_kmsg_bytes(int bytes)
104 {
105         kmsg_bytes = bytes;
106 }
107
108 /* Tag each group of saved records with a sequence number */
109 static int      oopscount;
110
111 const char *pstore_type_to_name(enum pstore_type_id type)
112 {
113         BUILD_BUG_ON(ARRAY_SIZE(pstore_type_names) != PSTORE_TYPE_MAX);
114
115         if (WARN_ON_ONCE(type >= PSTORE_TYPE_MAX))
116                 return "unknown";
117
118         return pstore_type_names[type];
119 }
120 EXPORT_SYMBOL_GPL(pstore_type_to_name);
121
122 enum pstore_type_id pstore_name_to_type(const char *name)
123 {
124         int i;
125
126         for (i = 0; i < PSTORE_TYPE_MAX; i++) {
127                 if (!strcmp(pstore_type_names[i], name))
128                         return i;
129         }
130
131         return PSTORE_TYPE_MAX;
132 }
133 EXPORT_SYMBOL_GPL(pstore_name_to_type);
134
135 static void pstore_timer_kick(void)
136 {
137         if (pstore_update_ms < 0)
138                 return;
139
140         mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms));
141 }
142
143 static bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
144 {
145         /*
146          * In case of NMI path, pstore shouldn't be blocked
147          * regardless of reason.
148          */
149         if (in_nmi())
150                 return true;
151
152         switch (reason) {
153         /* In panic case, other cpus are stopped by smp_send_stop(). */
154         case KMSG_DUMP_PANIC:
155         /*
156          * Emergency restart shouldn't be blocked by spinning on
157          * pstore_info::buf_lock.
158          */
159         case KMSG_DUMP_EMERG:
160                 return true;
161         default:
162                 return false;
163         }
164 }
165
166 static int pstore_compress(const void *in, void *out,
167                            unsigned int inlen, unsigned int outlen)
168 {
169         struct z_stream_s zstream = {
170                 .next_in        = in,
171                 .avail_in       = inlen,
172                 .next_out       = out,
173                 .avail_out      = outlen,
174                 .workspace      = compress_workspace,
175         };
176         int ret;
177
178         if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS))
179                 return -EINVAL;
180
181         ret = zlib_deflateInit2(&zstream, Z_DEFAULT_COMPRESSION, Z_DEFLATED,
182                                 -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);
183         if (ret != Z_OK)
184                 return -EINVAL;
185
186         ret = zlib_deflate(&zstream, Z_FINISH);
187         if (ret != Z_STREAM_END)
188                 return -EINVAL;
189
190         ret = zlib_deflateEnd(&zstream);
191         if (ret != Z_OK)
192                 pr_warn_once("zlib_deflateEnd() failed: %d\n", ret);
193
194         return zstream.total_out;
195 }
196
197 static void allocate_buf_for_compression(void)
198 {
199         char *buf;
200
201         /* Skip if not built-in or compression disabled. */
202         if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !compress ||
203             !strcmp(compress, "none")) {
204                 compress = NULL;
205                 return;
206         }
207
208         if (strcmp(compress, "deflate")) {
209                 pr_err("Unsupported compression '%s', falling back to deflate\n",
210                        compress);
211                 compress = "deflate";
212         }
213
214         /*
215          * The compression buffer only needs to be as large as the maximum
216          * uncompressed record size, since any record that would be expanded by
217          * compression is just stored uncompressed.
218          */
219         buf = kvzalloc(psinfo->bufsize, GFP_KERNEL);
220         if (!buf) {
221                 pr_err("Failed %zu byte compression buffer allocation for: %s\n",
222                        psinfo->bufsize, compress);
223                 return;
224         }
225
226         compress_workspace =
227                 vmalloc(zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL));
228         if (!compress_workspace) {
229                 pr_err("Failed to allocate zlib deflate workspace\n");
230                 kvfree(buf);
231                 return;
232         }
233
234         /* A non-NULL big_oops_buf indicates compression is available. */
235         big_oops_buf = buf;
236
237         pr_info("Using crash dump compression: %s\n", compress);
238 }
239
240 static void free_buf_for_compression(void)
241 {
242         if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && compress_workspace) {
243                 vfree(compress_workspace);
244                 compress_workspace = NULL;
245         }
246
247         kvfree(big_oops_buf);
248         big_oops_buf = NULL;
249 }
250
251 void pstore_record_init(struct pstore_record *record,
252                         struct pstore_info *psinfo)
253 {
254         memset(record, 0, sizeof(*record));
255
256         record->psi = psinfo;
257
258         /* Report zeroed timestamp if called before timekeeping has resumed. */
259         record->time = ns_to_timespec64(ktime_get_real_fast_ns());
260 }
261
262 /*
263  * callback from kmsg_dump. Save as much as we can (up to kmsg_bytes) from the
264  * end of the buffer.
265  */
266 static void pstore_dump(struct kmsg_dumper *dumper,
267                         enum kmsg_dump_reason reason)
268 {
269         struct kmsg_dump_iter iter;
270         unsigned long   total = 0;
271         const char      *why;
272         unsigned int    part = 1;
273         unsigned long   flags = 0;
274         int             saved_ret = 0;
275         int             ret;
276
277         why = kmsg_dump_reason_str(reason);
278
279         if (pstore_cannot_block_path(reason)) {
280                 if (!spin_trylock_irqsave(&psinfo->buf_lock, flags)) {
281                         pr_err("dump skipped in %s path because of concurrent dump\n",
282                                         in_nmi() ? "NMI" : why);
283                         return;
284                 }
285         } else {
286                 spin_lock_irqsave(&psinfo->buf_lock, flags);
287         }
288
289         kmsg_dump_rewind(&iter);
290
291         oopscount++;
292         while (total < kmsg_bytes) {
293                 char *dst;
294                 size_t dst_size;
295                 int header_size;
296                 int zipped_len = -1;
297                 size_t dump_size;
298                 struct pstore_record record;
299
300                 pstore_record_init(&record, psinfo);
301                 record.type = PSTORE_TYPE_DMESG;
302                 record.count = oopscount;
303                 record.reason = reason;
304                 record.part = part;
305                 record.buf = psinfo->buf;
306
307                 dst = big_oops_buf ?: psinfo->buf;
308                 dst_size = psinfo->bufsize;
309
310                 /* Write dump header. */
311                 header_size = snprintf(dst, dst_size, "%s#%d Part%u\n", why,
312                                  oopscount, part);
313                 dst_size -= header_size;
314
315                 /* Write dump contents. */
316                 if (!kmsg_dump_get_buffer(&iter, true, dst + header_size,
317                                           dst_size, &dump_size))
318                         break;
319
320                 if (big_oops_buf) {
321                         zipped_len = pstore_compress(dst, psinfo->buf,
322                                                 header_size + dump_size,
323                                                 psinfo->bufsize);
324
325                         if (zipped_len > 0) {
326                                 record.compressed = true;
327                                 record.size = zipped_len;
328                         } else {
329                                 record.size = header_size + dump_size;
330                                 memcpy(psinfo->buf, dst, record.size);
331                         }
332                 } else {
333                         record.size = header_size + dump_size;
334                 }
335
336                 ret = psinfo->write(&record);
337                 if (ret == 0 && reason == KMSG_DUMP_OOPS) {
338                         pstore_new_entry = 1;
339                         pstore_timer_kick();
340                 } else {
341                         /* Preserve only the first non-zero returned value. */
342                         if (!saved_ret)
343                                 saved_ret = ret;
344                 }
345
346                 total += record.size;
347                 part++;
348         }
349         spin_unlock_irqrestore(&psinfo->buf_lock, flags);
350
351         if (saved_ret) {
352                 pr_err_once("backend (%s) writing error (%d)\n", psinfo->name,
353                             saved_ret);
354         }
355 }
356
357 static struct kmsg_dumper pstore_dumper = {
358         .dump = pstore_dump,
359 };
360
361 /*
362  * Register with kmsg_dump to save last part of console log on panic.
363  */
364 static void pstore_register_kmsg(void)
365 {
366         kmsg_dump_register(&pstore_dumper);
367 }
368
369 static void pstore_unregister_kmsg(void)
370 {
371         kmsg_dump_unregister(&pstore_dumper);
372 }
373
374 #ifdef CONFIG_PSTORE_CONSOLE
375 static void pstore_console_write(struct console *con, const char *s, unsigned c)
376 {
377         struct pstore_record record;
378
379         if (!c)
380                 return;
381
382         pstore_record_init(&record, psinfo);
383         record.type = PSTORE_TYPE_CONSOLE;
384
385         record.buf = (char *)s;
386         record.size = c;
387         psinfo->write(&record);
388 }
389
390 static struct console pstore_console = {
391         .write  = pstore_console_write,
392         .index  = -1,
393 };
394
395 static void pstore_register_console(void)
396 {
397         /* Show which backend is going to get console writes. */
398         strscpy(pstore_console.name, psinfo->name,
399                 sizeof(pstore_console.name));
400         /*
401          * Always initialize flags here since prior unregister_console()
402          * calls may have changed settings (specifically CON_ENABLED).
403          */
404         pstore_console.flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME;
405         register_console(&pstore_console);
406 }
407
408 static void pstore_unregister_console(void)
409 {
410         unregister_console(&pstore_console);
411 }
412 #else
413 static void pstore_register_console(void) {}
414 static void pstore_unregister_console(void) {}
415 #endif
416
417 static int pstore_write_user_compat(struct pstore_record *record,
418                                     const char __user *buf)
419 {
420         int ret = 0;
421
422         if (record->buf)
423                 return -EINVAL;
424
425         record->buf = vmemdup_user(buf, record->size);
426         if (IS_ERR(record->buf)) {
427                 ret = PTR_ERR(record->buf);
428                 goto out;
429         }
430
431         ret = record->psi->write(record);
432
433         kvfree(record->buf);
434 out:
435         record->buf = NULL;
436
437         return unlikely(ret < 0) ? ret : record->size;
438 }
439
440 /*
441  * platform specific persistent storage driver registers with
442  * us here. If pstore is already mounted, call the platform
443  * read function right away to populate the file system. If not
444  * then the pstore mount code will call us later to fill out
445  * the file system.
446  */
447 int pstore_register(struct pstore_info *psi)
448 {
449         if (backend && strcmp(backend, psi->name)) {
450                 pr_warn("backend '%s' already in use: ignoring '%s'\n",
451                         backend, psi->name);
452                 return -EBUSY;
453         }
454
455         /* Sanity check flags. */
456         if (!psi->flags) {
457                 pr_warn("backend '%s' must support at least one frontend\n",
458                         psi->name);
459                 return -EINVAL;
460         }
461
462         /* Check for required functions. */
463         if (!psi->read || !psi->write) {
464                 pr_warn("backend '%s' must implement read() and write()\n",
465                         psi->name);
466                 return -EINVAL;
467         }
468
469         mutex_lock(&psinfo_lock);
470         if (psinfo) {
471                 pr_warn("backend '%s' already loaded: ignoring '%s'\n",
472                         psinfo->name, psi->name);
473                 mutex_unlock(&psinfo_lock);
474                 return -EBUSY;
475         }
476
477         if (!psi->write_user)
478                 psi->write_user = pstore_write_user_compat;
479         psinfo = psi;
480         mutex_init(&psinfo->read_mutex);
481         spin_lock_init(&psinfo->buf_lock);
482
483         if (psi->flags & PSTORE_FLAGS_DMESG)
484                 allocate_buf_for_compression();
485
486         pstore_get_records(0);
487
488         if (psi->flags & PSTORE_FLAGS_DMESG) {
489                 pstore_dumper.max_reason = psinfo->max_reason;
490                 pstore_register_kmsg();
491         }
492         if (psi->flags & PSTORE_FLAGS_CONSOLE)
493                 pstore_register_console();
494         if (psi->flags & PSTORE_FLAGS_FTRACE)
495                 pstore_register_ftrace();
496         if (psi->flags & PSTORE_FLAGS_PMSG)
497                 pstore_register_pmsg();
498
499         /* Start watching for new records, if desired. */
500         pstore_timer_kick();
501
502         /*
503          * Update the module parameter backend, so it is visible
504          * through /sys/module/pstore/parameters/backend
505          */
506         backend = kstrdup(psi->name, GFP_KERNEL);
507
508         pr_info("Registered %s as persistent store backend\n", psi->name);
509
510         mutex_unlock(&psinfo_lock);
511         return 0;
512 }
513 EXPORT_SYMBOL_GPL(pstore_register);
514
515 void pstore_unregister(struct pstore_info *psi)
516 {
517         /* It's okay to unregister nothing. */
518         if (!psi)
519                 return;
520
521         mutex_lock(&psinfo_lock);
522
523         /* Only one backend can be registered at a time. */
524         if (WARN_ON(psi != psinfo)) {
525                 mutex_unlock(&psinfo_lock);
526                 return;
527         }
528
529         /* Unregister all callbacks. */
530         if (psi->flags & PSTORE_FLAGS_PMSG)
531                 pstore_unregister_pmsg();
532         if (psi->flags & PSTORE_FLAGS_FTRACE)
533                 pstore_unregister_ftrace();
534         if (psi->flags & PSTORE_FLAGS_CONSOLE)
535                 pstore_unregister_console();
536         if (psi->flags & PSTORE_FLAGS_DMESG)
537                 pstore_unregister_kmsg();
538
539         /* Stop timer and make sure all work has finished. */
540         del_timer_sync(&pstore_timer);
541         flush_work(&pstore_work);
542
543         /* Remove all backend records from filesystem tree. */
544         pstore_put_backend_records(psi);
545
546         free_buf_for_compression();
547
548         psinfo = NULL;
549         kfree(backend);
550         backend = NULL;
551
552         pr_info("Unregistered %s as persistent store backend\n", psi->name);
553         mutex_unlock(&psinfo_lock);
554 }
555 EXPORT_SYMBOL_GPL(pstore_unregister);
556
557 static void decompress_record(struct pstore_record *record,
558                               struct z_stream_s *zstream)
559 {
560         int ret;
561         int unzipped_len;
562         char *unzipped, *workspace;
563
564         if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed)
565                 return;
566
567         /* Only PSTORE_TYPE_DMESG support compression. */
568         if (record->type != PSTORE_TYPE_DMESG) {
569                 pr_warn("ignored compressed record type %d\n", record->type);
570                 return;
571         }
572
573         /* Missing compression buffer means compression was not initialized. */
574         if (!zstream->workspace) {
575                 pr_warn("no decompression method initialized!\n");
576                 return;
577         }
578
579         ret = zlib_inflateReset(zstream);
580         if (ret != Z_OK) {
581                 pr_err("zlib_inflateReset() failed, ret = %d!\n", ret);
582                 return;
583         }
584
585         /* Allocate enough space to hold max decompression and ECC. */
586         workspace = kvzalloc(psinfo->bufsize + record->ecc_notice_size,
587                              GFP_KERNEL);
588         if (!workspace)
589                 return;
590
591         zstream->next_in        = record->buf;
592         zstream->avail_in       = record->size;
593         zstream->next_out       = workspace;
594         zstream->avail_out      = psinfo->bufsize;
595
596         ret = zlib_inflate(zstream, Z_FINISH);
597         if (ret != Z_STREAM_END) {
598                 pr_err("zlib_inflate() failed, ret = %d!\n", ret);
599                 kvfree(workspace);
600                 return;
601         }
602
603         unzipped_len = zstream->total_out;
604
605         /* Append ECC notice to decompressed buffer. */
606         memcpy(workspace + unzipped_len, record->buf + record->size,
607                record->ecc_notice_size);
608
609         /* Copy decompressed contents into an minimum-sized allocation. */
610         unzipped = kvmemdup(workspace, unzipped_len + record->ecc_notice_size,
611                             GFP_KERNEL);
612         kvfree(workspace);
613         if (!unzipped)
614                 return;
615
616         /* Swap out compressed contents with decompressed contents. */
617         kvfree(record->buf);
618         record->buf = unzipped;
619         record->size = unzipped_len;
620         record->compressed = false;
621 }
622
623 /*
624  * Read all the records from one persistent store backend. Create
625  * files in our filesystem.  Don't warn about -EEXIST errors
626  * when we are re-scanning the backing store looking to add new
627  * error records.
628  */
629 void pstore_get_backend_records(struct pstore_info *psi,
630                                 struct dentry *root, int quiet)
631 {
632         int failed = 0;
633         unsigned int stop_loop = 65536;
634         struct z_stream_s zstream = {};
635
636         if (!psi || !root)
637                 return;
638
639         if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && compress) {
640                 zstream.workspace = kvmalloc(zlib_inflate_workspacesize(),
641                                              GFP_KERNEL);
642                 zlib_inflateInit2(&zstream, -DEF_WBITS);
643         }
644
645         mutex_lock(&psi->read_mutex);
646         if (psi->open && psi->open(psi))
647                 goto out;
648
649         /*
650          * Backend callback read() allocates record.buf. decompress_record()
651          * may reallocate record.buf. On success, pstore_mkfile() will keep
652          * the record.buf, so free it only on failure.
653          */
654         for (; stop_loop; stop_loop--) {
655                 struct pstore_record *record;
656                 int rc;
657
658                 record = kzalloc(sizeof(*record), GFP_KERNEL);
659                 if (!record) {
660                         pr_err("out of memory creating record\n");
661                         break;
662                 }
663                 pstore_record_init(record, psi);
664
665                 record->size = psi->read(record);
666
667                 /* No more records left in backend? */
668                 if (record->size <= 0) {
669                         kfree(record);
670                         break;
671                 }
672
673                 decompress_record(record, &zstream);
674                 rc = pstore_mkfile(root, record);
675                 if (rc) {
676                         /* pstore_mkfile() did not take record, so free it. */
677                         kvfree(record->buf);
678                         kfree(record->priv);
679                         kfree(record);
680                         if (rc != -EEXIST || !quiet)
681                                 failed++;
682                 }
683         }
684         if (psi->close)
685                 psi->close(psi);
686 out:
687         mutex_unlock(&psi->read_mutex);
688
689         if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && compress) {
690                 if (zlib_inflateEnd(&zstream) != Z_OK)
691                         pr_warn("zlib_inflateEnd() failed\n");
692                 kvfree(zstream.workspace);
693         }
694
695         if (failed)
696                 pr_warn("failed to create %d record(s) from '%s'\n",
697                         failed, psi->name);
698         if (!stop_loop)
699                 pr_err("looping? Too many records seen from '%s'\n",
700                         psi->name);
701 }
702
703 static void pstore_dowork(struct work_struct *work)
704 {
705         pstore_get_records(1);
706 }
707
708 static void pstore_timefunc(struct timer_list *unused)
709 {
710         if (pstore_new_entry) {
711                 pstore_new_entry = 0;
712                 schedule_work(&pstore_work);
713         }
714
715         pstore_timer_kick();
716 }
717
718 static int __init pstore_init(void)
719 {
720         int ret;
721
722         ret = pstore_init_fs();
723         if (ret)
724                 free_buf_for_compression();
725
726         return ret;
727 }
728 late_initcall(pstore_init);
729
730 static void __exit pstore_exit(void)
731 {
732         pstore_exit_fs();
733 }
734 module_exit(pstore_exit)
735
736 MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
737 MODULE_LICENSE("GPL");