1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 ARM Ltd.
5 #ifndef __ASM_HARDIRQ_H
6 #define __ASM_HARDIRQ_H
8 #include <linux/cache.h>
9 #include <linux/percpu.h>
10 #include <linux/threads.h>
11 #include <asm/barrier.h>
13 #include <asm/kvm_arm.h>
14 #include <asm/sysreg.h>
17 unsigned int __softirq_pending;
18 } ____cacheline_aligned irq_cpustat_t;
20 #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
22 #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
29 DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
31 #define arch_nmi_enter() \
33 struct nmi_ctx *___ctx; \
36 if (!is_kernel_in_hyp_mode()) \
39 ___ctx = this_cpu_ptr(&nmi_contexts); \
45 ___hcr = read_sysreg(hcr_el2); \
46 if (!(___hcr & HCR_TGE)) { \
47 write_sysreg(___hcr | HCR_TGE, hcr_el2); \
51 * Make sure the sysreg write is performed before ___ctx->cnt \
52 * is set to 1. NMIs that see cnt == 1 will rely on us. \
57 * Make sure ___ctx->cnt is set before we save ___hcr. We \
58 * don't want ___ctx->hcr to be overwritten. \
61 ___ctx->hcr = ___hcr; \
64 #define arch_nmi_exit() \
66 struct nmi_ctx *___ctx; \
69 if (!is_kernel_in_hyp_mode()) \
72 ___ctx = this_cpu_ptr(&nmi_contexts); \
73 ___hcr = ___ctx->hcr; \
75 * Make sure we read ___ctx->hcr before we release \
76 * ___ctx->cnt as it makes ___ctx->hcr updatable again. \
81 * Make sure ___ctx->cnt release is visible before we \
82 * restore the sysreg. Otherwise a new NMI occurring \
83 * right after write_sysreg() can be fooled and think \
84 * we secured things for it. \
87 if (!___ctx->cnt && !(___hcr & HCR_TGE)) \
88 write_sysreg(___hcr, hcr_el2); \
91 static inline void ack_bad_irq(unsigned int irq)
93 extern unsigned long irq_err_count;
97 #endif /* __ASM_HARDIRQ_H */