1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 ARM Ltd.
5 #ifndef __ASM_HARDIRQ_H
6 #define __ASM_HARDIRQ_H
8 #include <linux/cache.h>
9 #include <linux/percpu.h>
10 #include <linux/threads.h>
11 #include <asm/barrier.h>
13 #include <asm/kvm_arm.h>
14 #include <asm/sysreg.h>
19 unsigned int __softirq_pending;
20 unsigned int ipi_irqs[NR_IPI];
21 } ____cacheline_aligned irq_cpustat_t;
23 #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
25 #define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
26 #define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
28 u64 smp_irq_stat_cpu(unsigned int cpu);
29 #define arch_irq_stat_cpu smp_irq_stat_cpu
31 #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
38 DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
40 #define arch_nmi_enter() \
42 struct nmi_ctx *___ctx; \
45 if (!is_kernel_in_hyp_mode()) \
48 ___ctx = this_cpu_ptr(&nmi_contexts); \
54 ___hcr = read_sysreg(hcr_el2); \
55 if (!(___hcr & HCR_TGE)) { \
56 write_sysreg(___hcr | HCR_TGE, hcr_el2); \
60 * Make sure the sysreg write is performed before ___ctx->cnt \
61 * is set to 1. NMIs that see cnt == 1 will rely on us. \
66 * Make sure ___ctx->cnt is set before we save ___hcr. We \
67 * don't want ___ctx->hcr to be overwritten. \
70 ___ctx->hcr = ___hcr; \
73 #define arch_nmi_exit() \
75 struct nmi_ctx *___ctx; \
78 if (!is_kernel_in_hyp_mode()) \
81 ___ctx = this_cpu_ptr(&nmi_contexts); \
82 ___hcr = ___ctx->hcr; \
84 * Make sure we read ___ctx->hcr before we release \
85 * ___ctx->cnt as it makes ___ctx->hcr updatable again. \
90 * Make sure ___ctx->cnt release is visible before we \
91 * restore the sysreg. Otherwise a new NMI occurring \
92 * right after write_sysreg() can be fooled and think \
93 * we secured things for it. \
96 if (!___ctx->cnt && !(___hcr & HCR_TGE)) \
97 write_sysreg(___hcr, hcr_el2); \
100 static inline void ack_bad_irq(unsigned int irq)
102 extern unsigned long irq_err_count;
106 #endif /* __ASM_HARDIRQ_H */