2 * Persistent Storage - platform driver interface parts.
4 * Copyright (C) 2007-2008 Google, Inc.
5 * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define pr_fmt(fmt) "pstore: " fmt
23 #include <linux/atomic.h>
24 #include <linux/types.h>
25 #include <linux/errno.h>
26 #include <linux/init.h>
27 #include <linux/kmsg_dump.h>
28 #include <linux/console.h>
29 #include <linux/module.h>
30 #include <linux/pstore.h>
31 #ifdef CONFIG_PSTORE_ZLIB_COMPRESS
32 #include <linux/zlib.h>
34 #ifdef CONFIG_PSTORE_LZO_COMPRESS
35 #include <linux/lzo.h>
37 #ifdef CONFIG_PSTORE_LZ4_COMPRESS
38 #include <linux/lz4.h>
40 #include <linux/string.h>
41 #include <linux/timer.h>
42 #include <linux/slab.h>
43 #include <linux/uaccess.h>
44 #include <linux/jiffies.h>
45 #include <linux/workqueue.h>
50 * We defer making "oops" entries appear in pstore - see
51 * whether the system is actually still running well enough
52 * to let someone see the entry
54 static int pstore_update_ms = -1;
55 module_param_named(update_ms, pstore_update_ms, int, 0600);
56 MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
57 "(default is -1, which means runtime updates are disabled; "
58 "enabling this option is not safe, it may lead to further "
59 "corruption on Oopses)");
61 static int pstore_new_entry;
63 static void pstore_timefunc(struct timer_list *);
64 static DEFINE_TIMER(pstore_timer, pstore_timefunc);
66 static void pstore_dowork(struct work_struct *);
67 static DECLARE_WORK(pstore_work, pstore_dowork);
70 * pstore_lock just protects "psinfo" during
71 * calls to pstore_register()
73 static DEFINE_SPINLOCK(pstore_lock);
74 struct pstore_info *psinfo;
78 /* Compression parameters */
79 #ifdef CONFIG_PSTORE_ZLIB_COMPRESS
81 #define WINDOW_BITS 12
83 static struct z_stream_s stream;
85 static unsigned char *workspace;
88 struct pstore_zbackend {
89 int (*compress)(const void *in, void *out, size_t inlen, size_t outlen);
90 int (*decompress)(void *in, void *out, size_t inlen, size_t outlen);
91 void (*allocate)(void);
97 static char *big_oops_buf;
98 static size_t big_oops_buf_sz;
100 /* How much of the console log to snapshot */
101 unsigned long kmsg_bytes = PSTORE_DEFAULT_KMSG_BYTES;
103 void pstore_set_kmsg_bytes(int bytes)
108 /* Tag each group of saved records with a sequence number */
109 static int oopscount;
111 static const char *get_reason_str(enum kmsg_dump_reason reason)
114 case KMSG_DUMP_PANIC:
118 case KMSG_DUMP_EMERG:
120 case KMSG_DUMP_RESTART:
124 case KMSG_DUMP_POWEROFF:
131 bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
134 * In case of NMI path, pstore shouldn't be blocked
135 * regardless of reason.
141 /* In panic case, other cpus are stopped by smp_send_stop(). */
142 case KMSG_DUMP_PANIC:
143 /* Emergency restart shouldn't be blocked by spin lock. */
144 case KMSG_DUMP_EMERG:
150 EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
152 #ifdef CONFIG_PSTORE_ZLIB_COMPRESS
153 /* Derived from logfs_compress() */
154 static int compress_zlib(const void *in, void *out, size_t inlen, size_t outlen)
159 err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
160 MEM_LEVEL, Z_DEFAULT_STRATEGY);
165 stream.avail_in = inlen;
167 stream.next_out = out;
168 stream.avail_out = outlen;
169 stream.total_out = 0;
171 err = zlib_deflate(&stream, Z_FINISH);
172 if (err != Z_STREAM_END)
175 err = zlib_deflateEnd(&stream);
179 if (stream.total_out >= stream.total_in)
182 ret = stream.total_out;
187 /* Derived from logfs_uncompress */
188 static int decompress_zlib(void *in, void *out, size_t inlen, size_t outlen)
193 err = zlib_inflateInit2(&stream, WINDOW_BITS);
198 stream.avail_in = inlen;
200 stream.next_out = out;
201 stream.avail_out = outlen;
202 stream.total_out = 0;
204 err = zlib_inflate(&stream, Z_FINISH);
205 if (err != Z_STREAM_END)
208 err = zlib_inflateEnd(&stream);
212 ret = stream.total_out;
217 static void allocate_zlib(void)
222 switch (psinfo->bufsize) {
223 /* buffer range for efivars */
233 /* buffer range for nvram, erst */
242 big_oops_buf_sz = (psinfo->bufsize * 100) / cmpr;
243 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
245 size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
246 zlib_inflate_workspacesize());
247 stream.workspace = kmalloc(size, GFP_KERNEL);
248 if (!stream.workspace) {
249 pr_err("No memory for compression workspace; skipping compression\n");
254 pr_err("No memory for uncompressed data; skipping compression\n");
255 stream.workspace = NULL;
260 static void free_zlib(void)
262 kfree(stream.workspace);
263 stream.workspace = NULL;
269 static const struct pstore_zbackend backend_zlib = {
270 .compress = compress_zlib,
271 .decompress = decompress_zlib,
272 .allocate = allocate_zlib,
278 #ifdef CONFIG_PSTORE_LZO_COMPRESS
279 static int compress_lzo(const void *in, void *out, size_t inlen, size_t outlen)
283 ret = lzo1x_1_compress(in, inlen, out, &outlen, workspace);
284 if (ret != LZO_E_OK) {
285 pr_err("lzo_compress error, ret = %d!\n", ret);
292 static int decompress_lzo(void *in, void *out, size_t inlen, size_t outlen)
296 ret = lzo1x_decompress_safe(in, inlen, out, &outlen);
297 if (ret != LZO_E_OK) {
298 pr_err("lzo_decompress error, ret = %d!\n", ret);
305 static void allocate_lzo(void)
307 big_oops_buf_sz = lzo1x_worst_compress(psinfo->bufsize);
308 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
310 workspace = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
312 pr_err("No memory for compression workspace; skipping compression\n");
317 pr_err("No memory for uncompressed data; skipping compression\n");
322 static void free_lzo(void)
330 static const struct pstore_zbackend backend_lzo = {
331 .compress = compress_lzo,
332 .decompress = decompress_lzo,
333 .allocate = allocate_lzo,
339 #ifdef CONFIG_PSTORE_LZ4_COMPRESS
340 static int compress_lz4(const void *in, void *out, size_t inlen, size_t outlen)
344 ret = LZ4_compress_default(in, out, inlen, outlen, workspace);
346 pr_err("LZ4_compress_default error; compression failed!\n");
353 static int decompress_lz4(void *in, void *out, size_t inlen, size_t outlen)
357 ret = LZ4_decompress_safe(in, out, inlen, outlen);
360 * LZ4_decompress_safe will return an error code
361 * (< 0) if decompression failed
363 pr_err("LZ4_decompress_safe error, ret = %d!\n", ret);
370 static void allocate_lz4(void)
372 big_oops_buf_sz = LZ4_compressBound(psinfo->bufsize);
373 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
375 workspace = kmalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
377 pr_err("No memory for compression workspace; skipping compression\n");
382 pr_err("No memory for uncompressed data; skipping compression\n");
387 static void free_lz4(void)
395 static const struct pstore_zbackend backend_lz4 = {
396 .compress = compress_lz4,
397 .decompress = decompress_lz4,
398 .allocate = allocate_lz4,
404 static const struct pstore_zbackend *zbackend =
405 #if defined(CONFIG_PSTORE_ZLIB_COMPRESS)
407 #elif defined(CONFIG_PSTORE_LZO_COMPRESS)
409 #elif defined(CONFIG_PSTORE_LZ4_COMPRESS)
415 static int pstore_compress(const void *in, void *out,
416 size_t inlen, size_t outlen)
419 return zbackend->compress(in, out, inlen, outlen);
424 static int pstore_decompress(void *in, void *out, size_t inlen, size_t outlen)
427 return zbackend->decompress(in, out, inlen, outlen);
432 static void allocate_buf_for_compression(void)
435 pr_info("using %s compression\n", zbackend->name);
436 zbackend->allocate();
438 pr_err("allocate compression buffer error!\n");
442 static void free_buf_for_compression(void)
447 pr_err("free compression buffer error!\n");
451 * Called when compression fails, since the printk buffer
452 * would be fetched for compression calling it again when
453 * compression fails would have moved the iterator of
454 * printk buffer which results in fetching old contents.
455 * Copy the recent messages from big_oops_buf to psinfo->buf
457 static size_t copy_kmsg_to_buffer(int hsize, size_t len)
462 total_len = hsize + len;
464 if (total_len > psinfo->bufsize) {
465 diff = total_len - psinfo->bufsize + hsize;
466 memcpy(psinfo->buf, big_oops_buf, hsize);
467 memcpy(psinfo->buf + hsize, big_oops_buf + diff,
468 psinfo->bufsize - hsize);
469 total_len = psinfo->bufsize;
471 memcpy(psinfo->buf, big_oops_buf, total_len);
476 void pstore_record_init(struct pstore_record *record,
477 struct pstore_info *psinfo)
479 memset(record, 0, sizeof(*record));
481 record->psi = psinfo;
483 /* Report zeroed timestamp if called before timekeeping has resumed. */
484 record->time = ns_to_timespec(ktime_get_real_fast_ns());
488 * callback from kmsg_dump. (s2,l2) has the most recently
489 * written bytes, older bytes are in (s1,l1). Save as much
490 * as we can from the end of the buffer.
492 static void pstore_dump(struct kmsg_dumper *dumper,
493 enum kmsg_dump_reason reason)
495 unsigned long total = 0;
497 unsigned int part = 1;
498 unsigned long flags = 0;
502 why = get_reason_str(reason);
504 if (pstore_cannot_block_path(reason)) {
505 is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags);
507 pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
508 , in_nmi() ? "NMI" : why);
512 spin_lock_irqsave(&psinfo->buf_lock, flags);
516 while (total < kmsg_bytes) {
522 struct pstore_record record;
524 pstore_record_init(&record, psinfo);
525 record.type = PSTORE_TYPE_DMESG;
526 record.count = oopscount;
527 record.reason = reason;
529 record.buf = psinfo->buf;
531 if (big_oops_buf && is_locked) {
533 dst_size = big_oops_buf_sz;
536 dst_size = psinfo->bufsize;
539 /* Write dump header. */
540 header_size = snprintf(dst, dst_size, "%s#%d Part%u\n", why,
542 dst_size -= header_size;
544 /* Write dump contents. */
545 if (!kmsg_dump_get_buffer(dumper, true, dst + header_size,
546 dst_size, &dump_size))
549 if (big_oops_buf && is_locked) {
550 zipped_len = pstore_compress(dst, psinfo->buf,
551 header_size + dump_size,
554 if (zipped_len > 0) {
555 record.compressed = true;
556 record.size = zipped_len;
558 record.size = copy_kmsg_to_buffer(header_size,
562 record.size = header_size + dump_size;
565 ret = psinfo->write(&record);
566 if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
567 pstore_new_entry = 1;
569 total += record.size;
573 spin_unlock_irqrestore(&psinfo->buf_lock, flags);
576 static struct kmsg_dumper pstore_dumper = {
581 * Register with kmsg_dump to save last part of console log on panic.
583 static void pstore_register_kmsg(void)
585 kmsg_dump_register(&pstore_dumper);
588 static void pstore_unregister_kmsg(void)
590 kmsg_dump_unregister(&pstore_dumper);
593 #ifdef CONFIG_PSTORE_CONSOLE
594 static void pstore_console_write(struct console *con, const char *s, unsigned c)
596 const char *e = s + c;
599 struct pstore_record record;
602 pstore_record_init(&record, psinfo);
603 record.type = PSTORE_TYPE_CONSOLE;
605 if (c > psinfo->bufsize)
608 if (oops_in_progress) {
609 if (!spin_trylock_irqsave(&psinfo->buf_lock, flags))
612 spin_lock_irqsave(&psinfo->buf_lock, flags);
614 record.buf = (char *)s;
616 psinfo->write(&record);
617 spin_unlock_irqrestore(&psinfo->buf_lock, flags);
623 static struct console pstore_console = {
625 .write = pstore_console_write,
626 .flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME,
630 static void pstore_register_console(void)
632 register_console(&pstore_console);
635 static void pstore_unregister_console(void)
637 unregister_console(&pstore_console);
640 static void pstore_register_console(void) {}
641 static void pstore_unregister_console(void) {}
644 static int pstore_write_user_compat(struct pstore_record *record,
645 const char __user *buf)
652 record->buf = memdup_user(buf, record->size);
653 if (IS_ERR(record->buf)) {
654 ret = PTR_ERR(record->buf);
658 ret = record->psi->write(record);
664 return unlikely(ret < 0) ? ret : record->size;
668 * platform specific persistent storage driver registers with
669 * us here. If pstore is already mounted, call the platform
670 * read function right away to populate the file system. If not
671 * then the pstore mount code will call us later to fill out
674 int pstore_register(struct pstore_info *psi)
676 struct module *owner = psi->owner;
678 if (backend && strcmp(backend, psi->name)) {
679 pr_warn("ignoring unexpected backend '%s'\n", psi->name);
683 /* Sanity check flags. */
685 pr_warn("backend '%s' must support at least one frontend\n",
690 /* Check for required functions. */
691 if (!psi->read || !psi->write) {
692 pr_warn("backend '%s' must implement read() and write()\n",
697 spin_lock(&pstore_lock);
699 pr_warn("backend '%s' already loaded: ignoring '%s'\n",
700 psinfo->name, psi->name);
701 spin_unlock(&pstore_lock);
705 if (!psi->write_user)
706 psi->write_user = pstore_write_user_compat;
708 mutex_init(&psinfo->read_mutex);
709 spin_unlock(&pstore_lock);
711 if (owner && !try_module_get(owner)) {
716 allocate_buf_for_compression();
718 if (pstore_is_mounted())
719 pstore_get_records(0);
721 if (psi->flags & PSTORE_FLAGS_DMESG)
722 pstore_register_kmsg();
723 if (psi->flags & PSTORE_FLAGS_CONSOLE)
724 pstore_register_console();
725 if (psi->flags & PSTORE_FLAGS_FTRACE)
726 pstore_register_ftrace();
727 if (psi->flags & PSTORE_FLAGS_PMSG)
728 pstore_register_pmsg();
730 /* Start watching for new records, if desired. */
731 if (pstore_update_ms >= 0) {
732 pstore_timer.expires = jiffies +
733 msecs_to_jiffies(pstore_update_ms);
734 add_timer(&pstore_timer);
738 * Update the module parameter backend, so it is visible
739 * through /sys/module/pstore/parameters/backend
743 pr_info("Registered %s as persistent store backend\n", psi->name);
749 EXPORT_SYMBOL_GPL(pstore_register);
751 void pstore_unregister(struct pstore_info *psi)
753 /* Stop timer and make sure all work has finished. */
754 pstore_update_ms = -1;
755 del_timer_sync(&pstore_timer);
756 flush_work(&pstore_work);
758 if (psi->flags & PSTORE_FLAGS_PMSG)
759 pstore_unregister_pmsg();
760 if (psi->flags & PSTORE_FLAGS_FTRACE)
761 pstore_unregister_ftrace();
762 if (psi->flags & PSTORE_FLAGS_CONSOLE)
763 pstore_unregister_console();
764 if (psi->flags & PSTORE_FLAGS_DMESG)
765 pstore_unregister_kmsg();
767 free_buf_for_compression();
772 EXPORT_SYMBOL_GPL(pstore_unregister);
774 static void decompress_record(struct pstore_record *record)
779 if (!record->compressed)
782 /* Only PSTORE_TYPE_DMESG support compression. */
783 if (record->type != PSTORE_TYPE_DMESG) {
784 pr_warn("ignored compressed record type %d\n", record->type);
788 /* No compression method has created the common buffer. */
790 pr_warn("no decompression buffer allocated\n");
794 unzipped_len = pstore_decompress(record->buf, big_oops_buf,
795 record->size, big_oops_buf_sz);
796 if (unzipped_len <= 0) {
797 pr_err("decompression failed: %d\n", unzipped_len);
801 /* Build new buffer for decompressed contents. */
802 decompressed = kmalloc(unzipped_len + record->ecc_notice_size,
805 pr_err("decompression ran out of memory\n");
808 memcpy(decompressed, big_oops_buf, unzipped_len);
810 /* Append ECC notice to decompressed buffer. */
811 memcpy(decompressed + unzipped_len, record->buf + record->size,
812 record->ecc_notice_size);
814 /* Swap out compresed contents with decompressed contents. */
816 record->buf = decompressed;
817 record->size = unzipped_len;
818 record->compressed = false;
822 * Read all the records from one persistent store backend. Create
823 * files in our filesystem. Don't warn about -EEXIST errors
824 * when we are re-scanning the backing store looking to add new
827 void pstore_get_backend_records(struct pstore_info *psi,
828 struct dentry *root, int quiet)
831 unsigned int stop_loop = 65536;
836 mutex_lock(&psi->read_mutex);
837 if (psi->open && psi->open(psi))
841 * Backend callback read() allocates record.buf. decompress_record()
842 * may reallocate record.buf. On success, pstore_mkfile() will keep
843 * the record.buf, so free it only on failure.
845 for (; stop_loop; stop_loop--) {
846 struct pstore_record *record;
849 record = kzalloc(sizeof(*record), GFP_KERNEL);
851 pr_err("out of memory creating record\n");
854 pstore_record_init(record, psi);
856 record->size = psi->read(record);
858 /* No more records left in backend? */
859 if (record->size <= 0) {
864 decompress_record(record);
865 rc = pstore_mkfile(root, record);
867 /* pstore_mkfile() did not take record, so free it. */
870 if (rc != -EEXIST || !quiet)
877 mutex_unlock(&psi->read_mutex);
880 pr_warn("failed to create %d record(s) from '%s'\n",
883 pr_err("looping? Too many records seen from '%s'\n",
887 static void pstore_dowork(struct work_struct *work)
889 pstore_get_records(1);
892 static void pstore_timefunc(struct timer_list *unused)
894 if (pstore_new_entry) {
895 pstore_new_entry = 0;
896 schedule_work(&pstore_work);
899 if (pstore_update_ms >= 0)
900 mod_timer(&pstore_timer,
901 jiffies + msecs_to_jiffies(pstore_update_ms));
904 module_param(backend, charp, 0444);
905 MODULE_PARM_DESC(backend, "Pstore backend to use");