1 // SPDX-License-Identifier: GPL-2.0-only
3 * Persistent Storage - platform driver interface parts.
5 * Copyright (C) 2007-2008 Google, Inc.
6 * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
9 #define pr_fmt(fmt) "pstore: " fmt
11 #include <linux/atomic.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/kmsg_dump.h>
16 #include <linux/console.h>
17 #include <linux/module.h>
18 #include <linux/pstore.h>
19 #if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
20 #include <linux/lzo.h>
22 #if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) || IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
23 #include <linux/lz4.h>
25 #if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
26 #include <linux/zstd.h>
28 #include <linux/crypto.h>
29 #include <linux/string.h>
30 #include <linux/timer.h>
31 #include <linux/scatterlist.h>
32 #include <linux/slab.h>
33 #include <linux/uaccess.h>
34 #include <linux/jiffies.h>
35 #include <linux/workqueue.h>
37 #include <crypto/acompress.h>
42 * We defer making "oops" entries appear in pstore - see
43 * whether the system is actually still running well enough
44 * to let someone see the entry
46 static int pstore_update_ms = -1;
47 module_param_named(update_ms, pstore_update_ms, int, 0600);
48 MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
49 "(default is -1, which means runtime updates are disabled; "
50 "enabling this option may not be safe; it may lead to further "
51 "corruption on Oopses)");
53 /* Names should be in the same order as the enum pstore_type_id */
54 static const char * const pstore_type_names[] = {
66 static int pstore_new_entry;
68 static void pstore_timefunc(struct timer_list *);
69 static DEFINE_TIMER(pstore_timer, pstore_timefunc);
71 static void pstore_dowork(struct work_struct *);
72 static DECLARE_WORK(pstore_work, pstore_dowork);
75 * psinfo_lock protects "psinfo" during calls to
76 * pstore_register(), pstore_unregister(), and
77 * the filesystem mount/unmount routines.
79 static DEFINE_MUTEX(psinfo_lock);
80 struct pstore_info *psinfo;
83 module_param(backend, charp, 0444);
84 MODULE_PARM_DESC(backend, "specific backend to use");
86 static char *compress =
87 #ifdef CONFIG_PSTORE_COMPRESS_DEFAULT
88 CONFIG_PSTORE_COMPRESS_DEFAULT;
92 module_param(compress, charp, 0444);
93 MODULE_PARM_DESC(compress, "compression to use");
95 /* Compression parameters */
96 static struct crypto_acomp *tfm;
97 static struct acomp_req *creq;
99 struct pstore_zbackend {
100 int (*zbufsize)(size_t size);
104 static char *big_oops_buf;
105 static size_t big_oops_buf_sz;
107 /* How much of the console log to snapshot */
108 unsigned long kmsg_bytes = CONFIG_PSTORE_DEFAULT_KMSG_BYTES;
110 void pstore_set_kmsg_bytes(int bytes)
115 /* Tag each group of saved records with a sequence number */
116 static int oopscount;
118 const char *pstore_type_to_name(enum pstore_type_id type)
120 BUILD_BUG_ON(ARRAY_SIZE(pstore_type_names) != PSTORE_TYPE_MAX);
122 if (WARN_ON_ONCE(type >= PSTORE_TYPE_MAX))
125 return pstore_type_names[type];
127 EXPORT_SYMBOL_GPL(pstore_type_to_name);
129 enum pstore_type_id pstore_name_to_type(const char *name)
133 for (i = 0; i < PSTORE_TYPE_MAX; i++) {
134 if (!strcmp(pstore_type_names[i], name))
138 return PSTORE_TYPE_MAX;
140 EXPORT_SYMBOL_GPL(pstore_name_to_type);
142 static void pstore_timer_kick(void)
144 if (pstore_update_ms < 0)
147 mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms));
150 static bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
153 * In case of NMI path, pstore shouldn't be blocked
154 * regardless of reason.
160 /* In panic case, other cpus are stopped by smp_send_stop(). */
161 case KMSG_DUMP_PANIC:
163 * Emergency restart shouldn't be blocked by spinning on
164 * pstore_info::buf_lock.
166 case KMSG_DUMP_EMERG:
173 #if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
174 static int zbufsize_deflate(size_t size)
179 /* buffer range for efivars */
189 /* buffer range for nvram, erst */
198 return (size * 100) / cmpr;
202 #if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
203 static int zbufsize_lzo(size_t size)
205 return lzo1x_worst_compress(size);
209 #if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) || IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
210 static int zbufsize_lz4(size_t size)
212 return LZ4_compressBound(size);
216 #if IS_ENABLED(CONFIG_PSTORE_842_COMPRESS)
217 static int zbufsize_842(size_t size)
223 #if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
224 static int zbufsize_zstd(size_t size)
226 return zstd_compress_bound(size);
230 static const struct pstore_zbackend *zbackend __ro_after_init;
232 static const struct pstore_zbackend zbackends[] = {
233 #if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
235 .zbufsize = zbufsize_deflate,
239 #if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
241 .zbufsize = zbufsize_lzo,
245 #if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS)
247 .zbufsize = zbufsize_lz4,
251 #if IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
253 .zbufsize = zbufsize_lz4,
257 #if IS_ENABLED(CONFIG_PSTORE_842_COMPRESS)
259 .zbufsize = zbufsize_842,
263 #if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
265 .zbufsize = zbufsize_zstd,
272 static int pstore_compress(const void *in, void *out,
273 unsigned int inlen, unsigned int outlen)
275 struct scatterlist src, dst;
278 if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS))
281 sg_init_table(&src, 1);
282 sg_set_buf(&src, in, inlen);
284 sg_init_table(&dst, 1);
285 sg_set_buf(&dst, out, outlen);
287 acomp_request_set_params(creq, &src, &dst, inlen, outlen);
289 ret = crypto_acomp_compress(creq);
291 pr_err("crypto_comp_compress failed, ret = %d!\n", ret);
298 static void allocate_buf_for_compression(void)
300 struct crypto_acomp *acomp;
304 /* Skip if not built-in or compression backend not selected yet. */
305 if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !zbackend)
308 /* Skip if no pstore backend yet or compression init already done. */
312 if (!crypto_has_acomp(zbackend->name, 0, CRYPTO_ALG_ASYNC)) {
313 pr_err("Unknown compression: %s\n", zbackend->name);
317 size = zbackend->zbufsize(psinfo->bufsize);
319 pr_err("Invalid compression size for %s: %d\n",
320 zbackend->name, size);
324 buf = kmalloc(size, GFP_KERNEL);
326 pr_err("Failed %d byte compression buffer allocation for: %s\n",
327 size, zbackend->name);
331 acomp = crypto_alloc_acomp(zbackend->name, 0, CRYPTO_ALG_ASYNC);
332 if (IS_ERR_OR_NULL(acomp)) {
334 pr_err("crypto_alloc_comp('%s') failed: %ld\n", zbackend->name,
339 creq = acomp_request_alloc(acomp);
341 crypto_free_acomp(acomp);
343 pr_err("acomp_request_alloc('%s') failed\n", zbackend->name);
347 /* A non-NULL big_oops_buf indicates compression is available. */
349 big_oops_buf_sz = size;
352 pr_info("Using crash dump compression: %s\n", zbackend->name);
355 static void free_buf_for_compression(void)
357 if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm) {
358 acomp_request_free(creq);
359 crypto_free_acomp(tfm);
368 * Called when compression fails, since the printk buffer
369 * would be fetched for compression calling it again when
370 * compression fails would have moved the iterator of
371 * printk buffer which results in fetching old contents.
372 * Copy the recent messages from big_oops_buf to psinfo->buf
374 static size_t copy_kmsg_to_buffer(int hsize, size_t len)
379 total_len = hsize + len;
381 if (total_len > psinfo->bufsize) {
382 diff = total_len - psinfo->bufsize + hsize;
383 memcpy(psinfo->buf, big_oops_buf, hsize);
384 memcpy(psinfo->buf + hsize, big_oops_buf + diff,
385 psinfo->bufsize - hsize);
386 total_len = psinfo->bufsize;
388 memcpy(psinfo->buf, big_oops_buf, total_len);
393 void pstore_record_init(struct pstore_record *record,
394 struct pstore_info *psinfo)
396 memset(record, 0, sizeof(*record));
398 record->psi = psinfo;
400 /* Report zeroed timestamp if called before timekeeping has resumed. */
401 record->time = ns_to_timespec64(ktime_get_real_fast_ns());
405 * callback from kmsg_dump. Save as much as we can (up to kmsg_bytes) from the
408 static void pstore_dump(struct kmsg_dumper *dumper,
409 enum kmsg_dump_reason reason)
411 struct kmsg_dump_iter iter;
412 unsigned long total = 0;
414 unsigned int part = 1;
415 unsigned long flags = 0;
418 why = kmsg_dump_reason_str(reason);
420 if (pstore_cannot_block_path(reason)) {
421 if (!spin_trylock_irqsave(&psinfo->buf_lock, flags)) {
422 pr_err("dump skipped in %s path because of concurrent dump\n",
423 in_nmi() ? "NMI" : why);
427 spin_lock_irqsave(&psinfo->buf_lock, flags);
430 kmsg_dump_rewind(&iter);
433 while (total < kmsg_bytes) {
439 struct pstore_record record;
441 pstore_record_init(&record, psinfo);
442 record.type = PSTORE_TYPE_DMESG;
443 record.count = oopscount;
444 record.reason = reason;
446 record.buf = psinfo->buf;
450 dst_size = big_oops_buf_sz;
453 dst_size = psinfo->bufsize;
456 /* Write dump header. */
457 header_size = snprintf(dst, dst_size, "%s#%d Part%u\n", why,
459 dst_size -= header_size;
461 /* Write dump contents. */
462 if (!kmsg_dump_get_buffer(&iter, true, dst + header_size,
463 dst_size, &dump_size))
467 zipped_len = pstore_compress(dst, psinfo->buf,
468 header_size + dump_size,
471 if (zipped_len > 0) {
472 record.compressed = true;
473 record.size = zipped_len;
475 record.size = copy_kmsg_to_buffer(header_size,
479 record.size = header_size + dump_size;
482 ret = psinfo->write(&record);
483 if (ret == 0 && reason == KMSG_DUMP_OOPS) {
484 pstore_new_entry = 1;
488 total += record.size;
491 spin_unlock_irqrestore(&psinfo->buf_lock, flags);
494 static struct kmsg_dumper pstore_dumper = {
499 * Register with kmsg_dump to save last part of console log on panic.
501 static void pstore_register_kmsg(void)
503 kmsg_dump_register(&pstore_dumper);
506 static void pstore_unregister_kmsg(void)
508 kmsg_dump_unregister(&pstore_dumper);
511 #ifdef CONFIG_PSTORE_CONSOLE
512 static void pstore_console_write(struct console *con, const char *s, unsigned c)
514 struct pstore_record record;
519 pstore_record_init(&record, psinfo);
520 record.type = PSTORE_TYPE_CONSOLE;
522 record.buf = (char *)s;
524 psinfo->write(&record);
527 static struct console pstore_console = {
528 .write = pstore_console_write,
532 static void pstore_register_console(void)
534 /* Show which backend is going to get console writes. */
535 strscpy(pstore_console.name, psinfo->name,
536 sizeof(pstore_console.name));
538 * Always initialize flags here since prior unregister_console()
539 * calls may have changed settings (specifically CON_ENABLED).
541 pstore_console.flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME;
542 register_console(&pstore_console);
545 static void pstore_unregister_console(void)
547 unregister_console(&pstore_console);
550 static void pstore_register_console(void) {}
551 static void pstore_unregister_console(void) {}
554 static int pstore_write_user_compat(struct pstore_record *record,
555 const char __user *buf)
562 record->buf = memdup_user(buf, record->size);
563 if (IS_ERR(record->buf)) {
564 ret = PTR_ERR(record->buf);
568 ret = record->psi->write(record);
574 return unlikely(ret < 0) ? ret : record->size;
578 * platform specific persistent storage driver registers with
579 * us here. If pstore is already mounted, call the platform
580 * read function right away to populate the file system. If not
581 * then the pstore mount code will call us later to fill out
584 int pstore_register(struct pstore_info *psi)
586 if (backend && strcmp(backend, psi->name)) {
587 pr_warn("ignoring unexpected backend '%s'\n", psi->name);
591 /* Sanity check flags. */
593 pr_warn("backend '%s' must support at least one frontend\n",
598 /* Check for required functions. */
599 if (!psi->read || !psi->write) {
600 pr_warn("backend '%s' must implement read() and write()\n",
605 mutex_lock(&psinfo_lock);
607 pr_warn("backend '%s' already loaded: ignoring '%s'\n",
608 psinfo->name, psi->name);
609 mutex_unlock(&psinfo_lock);
613 if (!psi->write_user)
614 psi->write_user = pstore_write_user_compat;
616 mutex_init(&psinfo->read_mutex);
617 spin_lock_init(&psinfo->buf_lock);
619 if (psi->flags & PSTORE_FLAGS_DMESG)
620 allocate_buf_for_compression();
622 pstore_get_records(0);
624 if (psi->flags & PSTORE_FLAGS_DMESG) {
625 pstore_dumper.max_reason = psinfo->max_reason;
626 pstore_register_kmsg();
628 if (psi->flags & PSTORE_FLAGS_CONSOLE)
629 pstore_register_console();
630 if (psi->flags & PSTORE_FLAGS_FTRACE)
631 pstore_register_ftrace();
632 if (psi->flags & PSTORE_FLAGS_PMSG)
633 pstore_register_pmsg();
635 /* Start watching for new records, if desired. */
639 * Update the module parameter backend, so it is visible
640 * through /sys/module/pstore/parameters/backend
642 backend = kstrdup(psi->name, GFP_KERNEL);
644 pr_info("Registered %s as persistent store backend\n", psi->name);
646 mutex_unlock(&psinfo_lock);
649 EXPORT_SYMBOL_GPL(pstore_register);
651 void pstore_unregister(struct pstore_info *psi)
653 /* It's okay to unregister nothing. */
657 mutex_lock(&psinfo_lock);
659 /* Only one backend can be registered at a time. */
660 if (WARN_ON(psi != psinfo)) {
661 mutex_unlock(&psinfo_lock);
665 /* Unregister all callbacks. */
666 if (psi->flags & PSTORE_FLAGS_PMSG)
667 pstore_unregister_pmsg();
668 if (psi->flags & PSTORE_FLAGS_FTRACE)
669 pstore_unregister_ftrace();
670 if (psi->flags & PSTORE_FLAGS_CONSOLE)
671 pstore_unregister_console();
672 if (psi->flags & PSTORE_FLAGS_DMESG)
673 pstore_unregister_kmsg();
675 /* Stop timer and make sure all work has finished. */
676 del_timer_sync(&pstore_timer);
677 flush_work(&pstore_work);
679 /* Remove all backend records from filesystem tree. */
680 pstore_put_backend_records(psi);
682 free_buf_for_compression();
687 mutex_unlock(&psinfo_lock);
689 EXPORT_SYMBOL_GPL(pstore_unregister);
691 static void decompress_record(struct pstore_record *record)
695 char *unzipped, *workspace;
696 struct acomp_req *dreq;
697 struct scatterlist src, dst;
699 if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed)
702 /* Only PSTORE_TYPE_DMESG support compression. */
703 if (record->type != PSTORE_TYPE_DMESG) {
704 pr_warn("ignored compressed record type %d\n", record->type);
708 /* Missing compression buffer means compression was not initialized. */
710 pr_warn("no decompression method initialized!\n");
714 /* Allocate enough space to hold max decompression and ECC. */
715 unzipped_len = big_oops_buf_sz;
716 workspace = kmalloc(unzipped_len + record->ecc_notice_size,
721 dreq = acomp_request_alloc(tfm);
727 sg_init_table(&src, 1);
728 sg_set_buf(&src, record->buf, record->size);
730 sg_init_table(&dst, 1);
731 sg_set_buf(&dst, workspace, unzipped_len);
733 acomp_request_set_params(dreq, &src, &dst, record->size, unzipped_len);
735 /* After decompression "unzipped_len" is almost certainly smaller. */
736 ret = crypto_acomp_decompress(dreq);
738 pr_err("crypto_acomp_decompress failed, ret = %d!\n", ret);
743 /* Append ECC notice to decompressed buffer. */
744 unzipped_len = dreq->dlen;
745 memcpy(workspace + unzipped_len, record->buf + record->size,
746 record->ecc_notice_size);
748 /* Copy decompressed contents into an minimum-sized allocation. */
749 unzipped = kmemdup(workspace, unzipped_len + record->ecc_notice_size,
752 acomp_request_free(dreq);
756 /* Swap out compressed contents with decompressed contents. */
758 record->buf = unzipped;
759 record->size = unzipped_len;
760 record->compressed = false;
764 * Read all the records from one persistent store backend. Create
765 * files in our filesystem. Don't warn about -EEXIST errors
766 * when we are re-scanning the backing store looking to add new
769 void pstore_get_backend_records(struct pstore_info *psi,
770 struct dentry *root, int quiet)
773 unsigned int stop_loop = 65536;
778 mutex_lock(&psi->read_mutex);
779 if (psi->open && psi->open(psi))
783 * Backend callback read() allocates record.buf. decompress_record()
784 * may reallocate record.buf. On success, pstore_mkfile() will keep
785 * the record.buf, so free it only on failure.
787 for (; stop_loop; stop_loop--) {
788 struct pstore_record *record;
791 record = kzalloc(sizeof(*record), GFP_KERNEL);
793 pr_err("out of memory creating record\n");
796 pstore_record_init(record, psi);
798 record->size = psi->read(record);
800 /* No more records left in backend? */
801 if (record->size <= 0) {
806 decompress_record(record);
807 rc = pstore_mkfile(root, record);
809 /* pstore_mkfile() did not take record, so free it. */
813 if (rc != -EEXIST || !quiet)
820 mutex_unlock(&psi->read_mutex);
823 pr_warn("failed to create %d record(s) from '%s'\n",
826 pr_err("looping? Too many records seen from '%s'\n",
830 static void pstore_dowork(struct work_struct *work)
832 pstore_get_records(1);
835 static void pstore_timefunc(struct timer_list *unused)
837 if (pstore_new_entry) {
838 pstore_new_entry = 0;
839 schedule_work(&pstore_work);
845 static void __init pstore_choose_compression(void)
847 const struct pstore_zbackend *step;
852 for (step = zbackends; step->name; step++) {
853 if (!strcmp(compress, step->name)) {
860 static int __init pstore_init(void)
864 pstore_choose_compression();
867 * Check if any pstore backends registered earlier but did not
868 * initialize compression because crypto was not ready. If so,
869 * initialize compression now.
871 allocate_buf_for_compression();
873 ret = pstore_init_fs();
875 free_buf_for_compression();
879 late_initcall(pstore_init);
881 static void __exit pstore_exit(void)
885 module_exit(pstore_exit)
887 MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
888 MODULE_LICENSE("GPL");