1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __X86_MCE_INTERNAL_H__
3 #define __X86_MCE_INTERNAL_H__
6 #define pr_fmt(fmt) "mce: " fmt
8 #include <linux/device.h>
13 MCE_DEFERRED_SEVERITY,
14 MCE_UCNA_SEVERITY = MCE_DEFERRED_SEVERITY,
23 extern struct blocking_notifier_head x86_mce_decoder_chain;
25 #define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */
27 struct mce_evt_llist {
28 struct llist_node llnode;
32 void mce_gen_pool_process(struct work_struct *__unused);
33 bool mce_gen_pool_empty(void);
34 int mce_gen_pool_add(struct mce *mce);
35 int mce_gen_pool_init(void);
36 struct llist_node *mce_gen_pool_prepare_records(void);
38 int mce_severity(struct mce *a, struct pt_regs *regs, int tolerant, char **msg, bool is_excp);
39 struct dentry *mce_get_debugfs_dir(void);
41 extern mce_banks_t mce_banks_ce_disabled;
43 #ifdef CONFIG_X86_MCE_INTEL
44 unsigned long cmci_intel_adjust_timer(unsigned long interval);
45 bool mce_intel_cmci_poll(void);
46 void mce_intel_hcpu_update(unsigned long cpu);
47 void cmci_disable_bank(int bank);
48 void intel_init_cmci(void);
49 void intel_init_lmce(void);
50 void intel_clear_lmce(void);
51 bool intel_filter_mce(struct mce *m);
53 # define cmci_intel_adjust_timer mce_adjust_timer_default
54 static inline bool mce_intel_cmci_poll(void) { return false; }
55 static inline void mce_intel_hcpu_update(unsigned long cpu) { }
56 static inline void cmci_disable_bank(int bank) { }
57 static inline void intel_init_cmci(void) { }
58 static inline void intel_init_lmce(void) { }
59 static inline void intel_clear_lmce(void) { }
60 static inline bool intel_filter_mce(struct mce *m) { return false; };
63 void mce_timer_kick(unsigned long interval);
65 #ifdef CONFIG_ACPI_APEI
66 int apei_write_mce(struct mce *m);
67 ssize_t apei_read_mce(struct mce *m, u64 *record_id);
68 int apei_check_mce(void);
69 int apei_clear_mce(u64 record_id);
71 static inline int apei_write_mce(struct mce *m)
75 static inline ssize_t apei_read_mce(struct mce *m, u64 *record_id)
79 static inline int apei_check_mce(void)
83 static inline int apei_clear_mce(u64 record_id)
90 * We consider records to be equivalent if bank+status+addr+misc all match.
91 * This is only used when the system is going down because of a fatal error
92 * to avoid cluttering the console log with essentially repeated information.
93 * In normal processing all errors seen are logged.
95 static inline bool mce_cmp(struct mce *m1, struct mce *m2)
97 return m1->bank != m2->bank ||
98 m1->status != m2->status ||
99 m1->addr != m2->addr ||
100 m1->misc != m2->misc;
103 extern struct device_attribute dev_attr_trigger;
105 #ifdef CONFIG_X86_MCELOG_LEGACY
106 void mce_work_trigger(void);
107 void mce_register_injector_chain(struct notifier_block *nb);
108 void mce_unregister_injector_chain(struct notifier_block *nb);
110 static inline void mce_work_trigger(void) { }
111 static inline void mce_register_injector_chain(struct notifier_block *nb) { }
112 static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
116 __u64 lmce_disabled : 1,
120 bios_cmci_threshold : 1,
121 /* Proper #MC exception handler is set */
137 extern struct mca_config mca_cfg;
138 DECLARE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
140 struct mce_vendor_flags {
142 * Indicates that overflow conditions are not fatal, when set.
144 __u64 overflow_recov : 1,
147 * (AMD) SUCCOR stands for S/W UnCorrectable error COntainment and
148 * Recovery. It indicates support for data poisoning in HW and deferred
154 * (AMD) SMCA: This bit indicates support for Scalable MCA which expands
155 * the register space for each MCA bank and also increases number of
156 * banks. Also, to accommodate the new banks and registers, the MCA
157 * register space is moved to a new MSR range.
161 /* AMD-style error thresholding banks present. */
164 /* Pentium, family 5-style MCA */
167 /* Centaur Winchip C6-style MCA */
170 /* SandyBridge IFU quirk */
176 extern struct mce_vendor_flags mce_flags;
185 u32 mca_msr_reg(int bank, enum mca_msr reg);
187 /* Decide whether to add MCE record to MCE event pool or filter it out. */
188 extern bool filter_mce(struct mce *m);
190 #ifdef CONFIG_X86_MCE_AMD
191 extern bool amd_filter_mce(struct mce *m);
193 static inline bool amd_filter_mce(struct mce *m) { return false; };
196 __visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
197 struct pt_regs *regs, int trapnr,
198 unsigned long error_code,
199 unsigned long fault_addr);
201 __visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
202 struct pt_regs *regs, int trapnr,
203 unsigned long error_code,
204 unsigned long fault_addr);
206 #ifdef CONFIG_X86_ANCIENT_MCE
207 void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
208 void winchip_mcheck_init(struct cpuinfo_x86 *c);
209 noinstr void pentium_machine_check(struct pt_regs *regs);
210 noinstr void winchip_machine_check(struct pt_regs *regs);
211 static inline void enable_p5_mce(void) { mce_p5_enabled = 1; }
213 static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {}
214 static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
215 static inline void enable_p5_mce(void) {}
216 static inline void pentium_machine_check(struct pt_regs *regs) {}
217 static inline void winchip_machine_check(struct pt_regs *regs) {}
220 #endif /* __X86_MCE_INTERNAL_H__ */