Merge branches 'for-next/sve-remove-pseudo-regs', 'for-next/backtrace-ipi', 'for...
[linux-2.6-microblaze.git] / arch / arm64 / kernel / smp.c
index 960b98b..af876a4 100644 (file)
@@ -32,7 +32,9 @@
 #include <linux/irq_work.h>
 #include <linux/kernel_stat.h>
 #include <linux/kexec.h>
+#include <linux/kgdb.h>
 #include <linux/kvm_host.h>
+#include <linux/nmi.h>
 
 #include <asm/alternative.h>
 #include <asm/atomic.h>
@@ -72,13 +74,19 @@ enum ipi_msg_type {
        IPI_CPU_CRASH_STOP,
        IPI_TIMER,
        IPI_IRQ_WORK,
-       IPI_WAKEUP,
-       NR_IPI
+       NR_IPI,
+       /*
+        * Any enum >= NR_IPI and < MAX_IPI is special and not tracable
+        * with trace_ipi_*
+        */
+       IPI_CPU_BACKTRACE = NR_IPI,
+       IPI_KGDB_ROUNDUP,
+       MAX_IPI
 };
 
-static int ipi_irq_base __read_mostly;
-static int nr_ipi __read_mostly = NR_IPI;
-static struct irq_desc *ipi_desc[NR_IPI] __read_mostly;
+static int ipi_irq_base __ro_after_init;
+static int nr_ipi __ro_after_init = NR_IPI;
+static struct irq_desc *ipi_desc[MAX_IPI] __ro_after_init;
 
 static void ipi_setup(int cpu);
 
@@ -520,7 +528,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
 {
        u64 hwid = processor->arm_mpidr;
 
-       if (!(processor->flags & ACPI_MADT_ENABLED)) {
+       if (!acpi_gicc_is_usable(processor)) {
                pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
                return;
        }
@@ -764,7 +772,6 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
        [IPI_CPU_CRASH_STOP]    = "CPU stop (for crash dump) interrupts",
        [IPI_TIMER]             = "Timer broadcast interrupts",
        [IPI_IRQ_WORK]          = "IRQ work interrupts",
-       [IPI_WAKEUP]            = "CPU wake-up interrupts",
 };
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
@@ -797,13 +804,6 @@ void arch_send_call_function_single_ipi(int cpu)
        smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
 }
 
-#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
-void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
-{
-       smp_cross_call(mask, IPI_WAKEUP);
-}
-#endif
-
 #ifdef CONFIG_IRQ_WORK
 void arch_irq_work_raise(void)
 {
@@ -854,6 +854,38 @@ static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs
 #endif
 }
 
+static void arm64_backtrace_ipi(cpumask_t *mask)
+{
+       __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
+}
+
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
+{
+       /*
+        * NOTE: though nmi_trigger_cpumask_backtrace() has "nmi_" in the name,
+        * nothing about it truly needs to be implemented using an NMI, it's
+        * just that it's _allowed_ to work with NMIs. If ipi_should_be_nmi()
+        * returned false our backtrace attempt will just use a regular IPI.
+        */
+       nmi_trigger_cpumask_backtrace(mask, exclude_cpu, arm64_backtrace_ipi);
+}
+
+#ifdef CONFIG_KGDB
+void kgdb_roundup_cpus(void)
+{
+       int this_cpu = raw_smp_processor_id();
+       int cpu;
+
+       for_each_online_cpu(cpu) {
+               /* No need to roundup ourselves */
+               if (cpu == this_cpu)
+                       continue;
+
+               __ipi_send_single(ipi_desc[IPI_KGDB_ROUNDUP], cpu);
+       }
+}
+#endif
+
 /*
  * Main handler for inter-processor interrupts
  */
@@ -897,13 +929,17 @@ static void do_handle_IPI(int ipinr)
                break;
 #endif
 
-#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
-       case IPI_WAKEUP:
-               WARN_ONCE(!acpi_parking_protocol_valid(cpu),
-                         "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
-                         cpu);
+       case IPI_CPU_BACKTRACE:
+               /*
+                * NOTE: in some cases this _won't_ be NMI context. See the
+                * comment in arch_trigger_cpumask_backtrace().
+                */
+               nmi_cpu_backtrace(get_irq_regs());
+               break;
+
+       case IPI_KGDB_ROUNDUP:
+               kgdb_nmicallback(cpu, get_irq_regs());
                break;
-#endif
 
        default:
                pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
@@ -926,6 +962,25 @@ static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
        __ipi_send_mask(ipi_desc[ipinr], target);
 }
 
+static bool ipi_should_be_nmi(enum ipi_msg_type ipi)
+{
+       DECLARE_STATIC_KEY_FALSE(supports_pseudo_nmis);
+
+       if (!system_uses_irq_prio_masking() ||
+           !static_branch_likely(&supports_pseudo_nmis))
+               return false;
+
+       switch (ipi) {
+       case IPI_CPU_STOP:
+       case IPI_CPU_CRASH_STOP:
+       case IPI_CPU_BACKTRACE:
+       case IPI_KGDB_ROUNDUP:
+               return true;
+       default:
+               return false;
+       }
+}
+
 static void ipi_setup(int cpu)
 {
        int i;
@@ -933,8 +988,14 @@ static void ipi_setup(int cpu)
        if (WARN_ON_ONCE(!ipi_irq_base))
                return;
 
-       for (i = 0; i < nr_ipi; i++)
-               enable_percpu_irq(ipi_irq_base + i, 0);
+       for (i = 0; i < nr_ipi; i++) {
+               if (ipi_should_be_nmi(i)) {
+                       prepare_percpu_nmi(ipi_irq_base + i);
+                       enable_percpu_nmi(ipi_irq_base + i, 0);
+               } else {
+                       enable_percpu_irq(ipi_irq_base + i, 0);
+               }
+       }
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -945,8 +1006,14 @@ static void ipi_teardown(int cpu)
        if (WARN_ON_ONCE(!ipi_irq_base))
                return;
 
-       for (i = 0; i < nr_ipi; i++)
-               disable_percpu_irq(ipi_irq_base + i);
+       for (i = 0; i < nr_ipi; i++) {
+               if (ipi_should_be_nmi(i)) {
+                       disable_percpu_nmi(ipi_irq_base + i);
+                       teardown_percpu_nmi(ipi_irq_base + i);
+               } else {
+                       disable_percpu_irq(ipi_irq_base + i);
+               }
+       }
 }
 #endif
 
@@ -954,15 +1021,23 @@ void __init set_smp_ipi_range(int ipi_base, int n)
 {
        int i;
 
-       WARN_ON(n < NR_IPI);
-       nr_ipi = min(n, NR_IPI);
+       WARN_ON(n < MAX_IPI);
+       nr_ipi = min(n, MAX_IPI);
 
        for (i = 0; i < nr_ipi; i++) {
                int err;
 
-               err = request_percpu_irq(ipi_base + i, ipi_handler,
-                                        "IPI", &cpu_number);
-               WARN_ON(err);
+               if (ipi_should_be_nmi(i)) {
+                       err = request_percpu_nmi(ipi_base + i, ipi_handler,
+                                                "IPI", &cpu_number);
+                       WARN(err, "Could not request IPI %d as NMI, err=%d\n",
+                            i, err);
+               } else {
+                       err = request_percpu_irq(ipi_base + i, ipi_handler,
+                                                "IPI", &cpu_number);
+                       WARN(err, "Could not request IPI %d as IRQ, err=%d\n",
+                            i, err);
+               }
 
                ipi_desc[i] = irq_to_desc(ipi_base + i);
                irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
@@ -979,6 +1054,17 @@ void arch_smp_send_reschedule(int cpu)
        smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
 }
 
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+void arch_send_wakeup_ipi(unsigned int cpu)
+{
+       /*
+        * We use a scheduler IPI to wake the CPU as this avoids the need for a
+        * dedicated IPI and we can safely handle spurious scheduler IPIs.
+        */
+       smp_send_reschedule(cpu);
+}
+#endif
+
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 void tick_broadcast(const struct cpumask *mask)
 {