1 // SPDX-License-Identifier: GPL-2.0-only
3 * Local APIC related interfaces to support IOAPIC, MSI, etc.
5 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
6 * Moved from arch/x86/kernel/apic/io_apic.c.
7 * Jiang Liu <jiang.liu@linux.intel.com>
8 * Enable support of hierarchical irqdomains
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/seq_file.h>
13 #include <linux/init.h>
14 #include <linux/compiler.h>
15 #include <linux/slab.h>
16 #include <asm/irqdomain.h>
17 #include <asm/hw_irq.h>
18 #include <asm/traps.h>
20 #include <asm/i8259.h>
22 #include <asm/irq_remapping.h>
24 #include <asm/trace/irq_vectors.h>
26 struct apic_chip_data {
27 struct irq_cfg hw_irq_cfg;
29 unsigned int prev_vector;
31 unsigned int prev_cpu;
33 struct hlist_node clist;
34 unsigned int move_in_progress : 1,
40 struct irq_domain *x86_vector_domain;
41 EXPORT_SYMBOL_GPL(x86_vector_domain);
42 static DEFINE_RAW_SPINLOCK(vector_lock);
43 static cpumask_var_t vector_searchmask;
44 static struct irq_chip lapic_controller;
45 static struct irq_matrix *vector_matrix;
47 static DEFINE_PER_CPU(struct hlist_head, cleanup_list);
50 void lock_vector_lock(void)
52 /* Used to the online set of cpus does not change
53 * during assign_irq_vector.
55 raw_spin_lock(&vector_lock);
58 void unlock_vector_lock(void)
60 raw_spin_unlock(&vector_lock);
63 void init_irq_alloc_info(struct irq_alloc_info *info,
64 const struct cpumask *mask)
66 memset(info, 0, sizeof(*info));
70 void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
75 memset(dst, 0, sizeof(*dst));
78 static struct apic_chip_data *apic_chip_data(struct irq_data *irqd)
83 while (irqd->parent_data)
84 irqd = irqd->parent_data;
86 return irqd->chip_data;
89 struct irq_cfg *irqd_cfg(struct irq_data *irqd)
91 struct apic_chip_data *apicd = apic_chip_data(irqd);
93 return apicd ? &apicd->hw_irq_cfg : NULL;
95 EXPORT_SYMBOL_GPL(irqd_cfg);
97 struct irq_cfg *irq_cfg(unsigned int irq)
99 return irqd_cfg(irq_get_irq_data(irq));
102 static struct apic_chip_data *alloc_apic_chip_data(int node)
104 struct apic_chip_data *apicd;
106 apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node);
108 INIT_HLIST_NODE(&apicd->clist);
112 static void free_apic_chip_data(struct apic_chip_data *apicd)
117 static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector,
120 struct apic_chip_data *apicd = apic_chip_data(irqd);
122 lockdep_assert_held(&vector_lock);
124 apicd->hw_irq_cfg.vector = vector;
125 apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu);
126 irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
127 trace_vector_config(irqd->irq, vector, cpu,
128 apicd->hw_irq_cfg.dest_apicid);
131 static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
134 struct apic_chip_data *apicd = apic_chip_data(irqd);
135 struct irq_desc *desc = irq_data_to_desc(irqd);
136 bool managed = irqd_affinity_is_managed(irqd);
138 lockdep_assert_held(&vector_lock);
140 trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector,
144 * If there is no vector associated or if the associated vector is
145 * the shutdown vector, which is associated to make PCI/MSI
146 * shutdown mode work, then there is nothing to release. Clear out
147 * prev_vector for this and the offlined target case.
149 apicd->prev_vector = 0;
150 if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR)
153 * If the target CPU of the previous vector is online, then mark
154 * the vector as move in progress and store it for cleanup when the
155 * first interrupt on the new vector arrives. If the target CPU is
156 * offline then the regular release mechanism via the cleanup
157 * vector is not possible and the vector can be immediately freed
158 * in the underlying matrix allocator.
160 if (cpu_online(apicd->cpu)) {
161 apicd->move_in_progress = true;
162 apicd->prev_vector = apicd->vector;
163 apicd->prev_cpu = apicd->cpu;
165 irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
170 apicd->vector = newvec;
172 BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
173 per_cpu(vector_irq, newcpu)[newvec] = desc;
176 static void vector_assign_managed_shutdown(struct irq_data *irqd)
178 unsigned int cpu = cpumask_first(cpu_online_mask);
180 apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu);
183 static int reserve_managed_vector(struct irq_data *irqd)
185 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
186 struct apic_chip_data *apicd = apic_chip_data(irqd);
190 raw_spin_lock_irqsave(&vector_lock, flags);
191 apicd->is_managed = true;
192 ret = irq_matrix_reserve_managed(vector_matrix, affmsk);
193 raw_spin_unlock_irqrestore(&vector_lock, flags);
194 trace_vector_reserve_managed(irqd->irq, ret);
198 static void reserve_irq_vector_locked(struct irq_data *irqd)
200 struct apic_chip_data *apicd = apic_chip_data(irqd);
202 irq_matrix_reserve(vector_matrix);
203 apicd->can_reserve = true;
204 apicd->has_reserved = true;
205 irqd_set_can_reserve(irqd);
206 trace_vector_reserve(irqd->irq, 0);
207 vector_assign_managed_shutdown(irqd);
210 static int reserve_irq_vector(struct irq_data *irqd)
214 raw_spin_lock_irqsave(&vector_lock, flags);
215 reserve_irq_vector_locked(irqd);
216 raw_spin_unlock_irqrestore(&vector_lock, flags);
221 assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest)
223 struct apic_chip_data *apicd = apic_chip_data(irqd);
224 bool resvd = apicd->has_reserved;
225 unsigned int cpu = apicd->cpu;
226 int vector = apicd->vector;
228 lockdep_assert_held(&vector_lock);
231 * If the current target CPU is online and in the new requested
232 * affinity mask, there is no point in moving the interrupt from
233 * one CPU to another.
235 if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
239 * Careful here. @apicd might either have move_in_progress set or
240 * be enqueued for cleanup. Assigning a new vector would either
241 * leave a stale vector on some CPU around or in case of a pending
242 * cleanup corrupt the hlist.
244 if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist))
247 vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
248 trace_vector_alloc(irqd->irq, vector, resvd, vector);
251 apic_update_vector(irqd, vector, cpu);
252 apic_update_irq_cfg(irqd, vector, cpu);
257 static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest)
262 raw_spin_lock_irqsave(&vector_lock, flags);
263 cpumask_and(vector_searchmask, dest, cpu_online_mask);
264 ret = assign_vector_locked(irqd, vector_searchmask);
265 raw_spin_unlock_irqrestore(&vector_lock, flags);
269 static int assign_irq_vector_any_locked(struct irq_data *irqd)
271 /* Get the affinity mask - either irq_default_affinity or (user) set */
272 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
273 int node = irq_data_get_node(irqd);
275 if (node == NUMA_NO_NODE)
277 /* Try the intersection of @affmsk and node mask */
278 cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
279 if (!assign_vector_locked(irqd, vector_searchmask))
281 /* Try the node mask */
282 if (!assign_vector_locked(irqd, cpumask_of_node(node)))
285 /* Try the full affinity mask */
286 cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
287 if (!assign_vector_locked(irqd, vector_searchmask))
289 /* Try the full online mask */
290 return assign_vector_locked(irqd, cpu_online_mask);
294 assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info)
296 if (irqd_affinity_is_managed(irqd))
297 return reserve_managed_vector(irqd);
299 return assign_irq_vector(irqd, info->mask);
301 * Make only a global reservation with no guarantee. A real vector
302 * is associated at activation time.
304 return reserve_irq_vector(irqd);
308 assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
310 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
311 struct apic_chip_data *apicd = apic_chip_data(irqd);
314 cpumask_and(vector_searchmask, dest, affmsk);
316 /* set_affinity might call here for nothing */
317 if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
319 vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
321 trace_vector_alloc_managed(irqd->irq, vector, vector);
324 apic_update_vector(irqd, vector, cpu);
325 apic_update_irq_cfg(irqd, vector, cpu);
329 static void clear_irq_vector(struct irq_data *irqd)
331 struct apic_chip_data *apicd = apic_chip_data(irqd);
332 bool managed = irqd_affinity_is_managed(irqd);
333 unsigned int vector = apicd->vector;
335 lockdep_assert_held(&vector_lock);
340 trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
343 per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_UNUSED;
344 irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
347 /* Clean up move in progress */
348 vector = apicd->prev_vector;
352 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_UNUSED;
353 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
354 apicd->prev_vector = 0;
355 apicd->move_in_progress = 0;
356 hlist_del_init(&apicd->clist);
359 static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd)
361 struct apic_chip_data *apicd = apic_chip_data(irqd);
364 trace_vector_deactivate(irqd->irq, apicd->is_managed,
365 apicd->can_reserve, false);
367 /* Regular fixed assigned interrupt */
368 if (!apicd->is_managed && !apicd->can_reserve)
370 /* If the interrupt has a global reservation, nothing to do */
371 if (apicd->has_reserved)
374 raw_spin_lock_irqsave(&vector_lock, flags);
375 clear_irq_vector(irqd);
376 if (apicd->can_reserve)
377 reserve_irq_vector_locked(irqd);
379 vector_assign_managed_shutdown(irqd);
380 raw_spin_unlock_irqrestore(&vector_lock, flags);
383 static int activate_reserved(struct irq_data *irqd)
385 struct apic_chip_data *apicd = apic_chip_data(irqd);
388 ret = assign_irq_vector_any_locked(irqd);
390 apicd->has_reserved = false;
392 * Core might have disabled reservation mode after
393 * allocating the irq descriptor. Ideally this should
394 * happen before allocation time, but that would require
395 * completely convoluted ways of transporting that
398 if (!irqd_can_reserve(irqd))
399 apicd->can_reserve = false;
404 static int activate_managed(struct irq_data *irqd)
406 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
409 cpumask_and(vector_searchmask, dest, cpu_online_mask);
410 if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
411 /* Something in the core code broke! Survive gracefully */
412 pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
416 ret = assign_managed_vector(irqd, vector_searchmask);
418 * This should not happen. The vector reservation got buggered. Handle
421 if (WARN_ON_ONCE(ret < 0)) {
422 pr_err("Managed startup irq %u, no vector available\n",
428 static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
431 struct apic_chip_data *apicd = apic_chip_data(irqd);
435 trace_vector_activate(irqd->irq, apicd->is_managed,
436 apicd->can_reserve, reserve);
438 /* Nothing to do for fixed assigned vectors */
439 if (!apicd->can_reserve && !apicd->is_managed)
442 raw_spin_lock_irqsave(&vector_lock, flags);
443 if (reserve || irqd_is_managed_and_shutdown(irqd))
444 vector_assign_managed_shutdown(irqd);
445 else if (apicd->is_managed)
446 ret = activate_managed(irqd);
447 else if (apicd->has_reserved)
448 ret = activate_reserved(irqd);
449 raw_spin_unlock_irqrestore(&vector_lock, flags);
453 static void vector_free_reserved_and_managed(struct irq_data *irqd)
455 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
456 struct apic_chip_data *apicd = apic_chip_data(irqd);
458 trace_vector_teardown(irqd->irq, apicd->is_managed,
459 apicd->has_reserved);
461 if (apicd->has_reserved)
462 irq_matrix_remove_reserved(vector_matrix);
463 if (apicd->is_managed)
464 irq_matrix_remove_managed(vector_matrix, dest);
467 static void x86_vector_free_irqs(struct irq_domain *domain,
468 unsigned int virq, unsigned int nr_irqs)
470 struct apic_chip_data *apicd;
471 struct irq_data *irqd;
475 for (i = 0; i < nr_irqs; i++) {
476 irqd = irq_domain_get_irq_data(x86_vector_domain, virq + i);
477 if (irqd && irqd->chip_data) {
478 raw_spin_lock_irqsave(&vector_lock, flags);
479 clear_irq_vector(irqd);
480 vector_free_reserved_and_managed(irqd);
481 apicd = irqd->chip_data;
482 irq_domain_reset_irq_data(irqd);
483 raw_spin_unlock_irqrestore(&vector_lock, flags);
484 free_apic_chip_data(apicd);
489 static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd,
490 struct apic_chip_data *apicd)
493 bool realloc = false;
495 apicd->vector = ISA_IRQ_VECTOR(virq);
498 raw_spin_lock_irqsave(&vector_lock, flags);
500 * If the interrupt is activated, then it must stay at this vector
501 * position. That's usually the timer interrupt (0).
503 if (irqd_is_activated(irqd)) {
504 trace_vector_setup(virq, true, 0);
505 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
507 /* Release the vector */
508 apicd->can_reserve = true;
509 irqd_set_can_reserve(irqd);
510 clear_irq_vector(irqd);
513 raw_spin_unlock_irqrestore(&vector_lock, flags);
517 static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
518 unsigned int nr_irqs, void *arg)
520 struct irq_alloc_info *info = arg;
521 struct apic_chip_data *apicd;
522 struct irq_data *irqd;
528 /* Currently vector allocator can't guarantee contiguous allocations */
529 if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
532 for (i = 0; i < nr_irqs; i++) {
533 irqd = irq_domain_get_irq_data(domain, virq + i);
535 node = irq_data_get_node(irqd);
536 WARN_ON_ONCE(irqd->chip_data);
537 apicd = alloc_apic_chip_data(node);
543 apicd->irq = virq + i;
544 irqd->chip = &lapic_controller;
545 irqd->chip_data = apicd;
546 irqd->hwirq = virq + i;
547 irqd_set_single_target(irqd);
549 * Legacy vectors are already assigned when the IOAPIC
550 * takes them over. They stay on the same vector. This is
551 * required for check_timer() to work correctly as it might
552 * switch back to legacy mode. Only update the hardware
555 if (info->flags & X86_IRQ_ALLOC_LEGACY) {
556 if (!vector_configure_legacy(virq + i, irqd, apicd))
560 err = assign_irq_vector_policy(irqd, info);
561 trace_vector_setup(virq + i, false, err);
563 irqd->chip_data = NULL;
564 free_apic_chip_data(apicd);
572 x86_vector_free_irqs(domain, virq, i);
576 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
577 static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
578 struct irq_data *irqd, int ind)
580 struct apic_chip_data apicd;
585 irq_matrix_debug_show(m, vector_matrix, ind);
590 if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) {
591 seq_printf(m, "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq));
592 seq_printf(m, "%*sTarget: Legacy PIC all CPUs\n", ind, "");
596 if (!irqd->chip_data) {
597 seq_printf(m, "%*sVector: Not assigned\n", ind, "");
601 raw_spin_lock_irqsave(&vector_lock, flags);
602 memcpy(&apicd, irqd->chip_data, sizeof(apicd));
603 raw_spin_unlock_irqrestore(&vector_lock, flags);
605 seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector);
606 seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu);
607 if (apicd.prev_vector) {
608 seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector);
609 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu);
611 seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0);
612 seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0);
613 seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0);
614 seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0);
615 seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist));
619 static const struct irq_domain_ops x86_vector_domain_ops = {
620 .alloc = x86_vector_alloc_irqs,
621 .free = x86_vector_free_irqs,
622 .activate = x86_vector_activate,
623 .deactivate = x86_vector_deactivate,
624 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
625 .debug_show = x86_vector_debug_show,
629 int __init arch_probe_nr_irqs(void)
633 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
634 nr_irqs = NR_VECTORS * nr_cpu_ids;
636 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
637 #if defined(CONFIG_PCI_MSI)
639 * for MSI and HT dyn irq
641 if (gsi_top <= NR_IRQS_LEGACY)
642 nr += 8 * nr_cpu_ids;
650 * We don't know if PIC is present at this point so we need to do
651 * probe() to get the right number of legacy IRQs.
653 return legacy_pic->probe();
656 void lapic_assign_legacy_vector(unsigned int irq, bool replace)
659 * Use assign system here so it wont get accounted as allocated
660 * and moveable in the cpu hotplug check and it prevents managed
661 * irq reservation from touching it.
663 irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
666 void __init lapic_assign_system_vectors(void)
668 unsigned int i, vector = 0;
670 for_each_set_bit_from(vector, system_vectors, NR_VECTORS)
671 irq_matrix_assign_system(vector_matrix, vector, false);
673 if (nr_legacy_irqs() > 1)
674 lapic_assign_legacy_vector(PIC_CASCADE_IR, false);
676 /* System vectors are reserved, online it */
677 irq_matrix_online(vector_matrix);
679 /* Mark the preallocated legacy interrupts */
680 for (i = 0; i < nr_legacy_irqs(); i++) {
681 if (i != PIC_CASCADE_IR)
682 irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i));
686 int __init arch_early_irq_init(void)
688 struct fwnode_handle *fn;
690 fn = irq_domain_alloc_named_fwnode("VECTOR");
692 x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
694 BUG_ON(x86_vector_domain == NULL);
695 irq_domain_free_fwnode(fn);
696 irq_set_default_host(x86_vector_domain);
698 arch_init_msi_domain(x86_vector_domain);
700 BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
703 * Allocate the vector matrix allocator data structure and limit the
706 vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR,
707 FIRST_SYSTEM_VECTOR);
708 BUG_ON(!vector_matrix);
710 return arch_early_ioapic_init();
715 static struct irq_desc *__setup_vector_irq(int vector)
717 int isairq = vector - ISA_IRQ_VECTOR(0);
719 /* Check whether the irq is in the legacy space */
720 if (isairq < 0 || isairq >= nr_legacy_irqs())
721 return VECTOR_UNUSED;
722 /* Check whether the irq is handled by the IOAPIC */
723 if (test_bit(isairq, &io_apic_irqs))
724 return VECTOR_UNUSED;
725 return irq_to_desc(isairq);
728 /* Online the local APIC infrastructure and initialize the vectors */
729 void lapic_online(void)
733 lockdep_assert_held(&vector_lock);
735 /* Online the vector matrix array for this CPU */
736 irq_matrix_online(vector_matrix);
739 * The interrupt affinity logic never targets interrupts to offline
740 * CPUs. The exception are the legacy PIC interrupts. In general
741 * they are only targeted to CPU0, but depending on the platform
742 * they can be distributed to any online CPU in hardware. The
743 * kernel has no influence on that. So all active legacy vectors
744 * must be installed on all CPUs. All non legacy interrupts can be
747 for (vector = 0; vector < NR_VECTORS; vector++)
748 this_cpu_write(vector_irq[vector], __setup_vector_irq(vector));
751 void lapic_offline(void)
754 irq_matrix_offline(vector_matrix);
755 unlock_vector_lock();
758 static int apic_set_affinity(struct irq_data *irqd,
759 const struct cpumask *dest, bool force)
761 struct apic_chip_data *apicd = apic_chip_data(irqd);
765 * Core code can call here for inactive interrupts. For inactive
766 * interrupts which use managed or reservation mode there is no
767 * point in going through the vector assignment right now as the
768 * activation will assign a vector which fits the destination
769 * cpumask. Let the core code store the destination mask and be
772 if (!irqd_is_activated(irqd) &&
773 (apicd->is_managed || apicd->can_reserve))
774 return IRQ_SET_MASK_OK;
776 raw_spin_lock(&vector_lock);
777 cpumask_and(vector_searchmask, dest, cpu_online_mask);
778 if (irqd_affinity_is_managed(irqd))
779 err = assign_managed_vector(irqd, vector_searchmask);
781 err = assign_vector_locked(irqd, vector_searchmask);
782 raw_spin_unlock(&vector_lock);
783 return err ? err : IRQ_SET_MASK_OK;
787 # define apic_set_affinity NULL
790 static int apic_retrigger_irq(struct irq_data *irqd)
792 struct apic_chip_data *apicd = apic_chip_data(irqd);
795 raw_spin_lock_irqsave(&vector_lock, flags);
796 apic->send_IPI(apicd->cpu, apicd->vector);
797 raw_spin_unlock_irqrestore(&vector_lock, flags);
802 void apic_ack_irq(struct irq_data *irqd)
808 void apic_ack_edge(struct irq_data *irqd)
810 irq_complete_move(irqd_cfg(irqd));
814 static struct irq_chip lapic_controller = {
816 .irq_ack = apic_ack_edge,
817 .irq_set_affinity = apic_set_affinity,
818 .irq_retrigger = apic_retrigger_irq,
823 static void free_moved_vector(struct apic_chip_data *apicd)
825 unsigned int vector = apicd->prev_vector;
826 unsigned int cpu = apicd->prev_cpu;
827 bool managed = apicd->is_managed;
830 * This should never happen. Managed interrupts are not
831 * migrated except on CPU down, which does not involve the
832 * cleanup vector. But try to keep the accounting correct
835 WARN_ON_ONCE(managed);
837 trace_vector_free_moved(apicd->irq, cpu, vector, managed);
838 irq_matrix_free(vector_matrix, cpu, vector, managed);
839 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
840 hlist_del_init(&apicd->clist);
841 apicd->prev_vector = 0;
842 apicd->move_in_progress = 0;
845 asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
847 struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
848 struct apic_chip_data *apicd;
849 struct hlist_node *tmp;
852 /* Prevent vectors vanishing under us */
853 raw_spin_lock(&vector_lock);
855 hlist_for_each_entry_safe(apicd, tmp, clhead, clist) {
856 unsigned int irr, vector = apicd->prev_vector;
859 * Paranoia: Check if the vector that needs to be cleaned
860 * up is registered at the APICs IRR. If so, then this is
861 * not the best time to clean it up. Clean it up in the
862 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
863 * to this CPU. IRQ_MOVE_CLEANUP_VECTOR is the lowest
864 * priority external vector, so on return from this
865 * interrupt the device interrupt will happen first.
867 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
868 if (irr & (1U << (vector % 32))) {
869 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
872 free_moved_vector(apicd);
875 raw_spin_unlock(&vector_lock);
879 static void __send_cleanup_vector(struct apic_chip_data *apicd)
883 raw_spin_lock(&vector_lock);
884 apicd->move_in_progress = 0;
885 cpu = apicd->prev_cpu;
886 if (cpu_online(cpu)) {
887 hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu));
888 apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR);
890 apicd->prev_vector = 0;
892 raw_spin_unlock(&vector_lock);
895 void send_cleanup_vector(struct irq_cfg *cfg)
897 struct apic_chip_data *apicd;
899 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
900 if (apicd->move_in_progress)
901 __send_cleanup_vector(apicd);
904 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
906 struct apic_chip_data *apicd;
908 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
909 if (likely(!apicd->move_in_progress))
912 if (vector == apicd->vector && apicd->cpu == smp_processor_id())
913 __send_cleanup_vector(apicd);
916 void irq_complete_move(struct irq_cfg *cfg)
918 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
922 * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
924 void irq_force_complete_move(struct irq_desc *desc)
926 struct apic_chip_data *apicd;
927 struct irq_data *irqd;
931 * The function is called for all descriptors regardless of which
932 * irqdomain they belong to. For example if an IRQ is provided by
933 * an irq_chip as part of a GPIO driver, the chip data for that
934 * descriptor is specific to the irq_chip in question.
936 * Check first that the chip_data is what we expect
937 * (apic_chip_data) before touching it any further.
939 irqd = irq_domain_get_irq_data(x86_vector_domain,
940 irq_desc_get_irq(desc));
944 raw_spin_lock(&vector_lock);
945 apicd = apic_chip_data(irqd);
950 * If prev_vector is empty, no action required.
952 vector = apicd->prev_vector;
957 * This is tricky. If the cleanup of the old vector has not been
958 * done yet, then the following setaffinity call will fail with
959 * -EBUSY. This can leave the interrupt in a stale state.
961 * All CPUs are stuck in stop machine with interrupts disabled so
962 * calling __irq_complete_move() would be completely pointless.
964 * 1) The interrupt is in move_in_progress state. That means that we
965 * have not seen an interrupt since the io_apic was reprogrammed to
968 * 2) The interrupt has fired on the new vector, but the cleanup IPIs
969 * have not been processed yet.
971 if (apicd->move_in_progress) {
973 * In theory there is a race:
975 * set_ioapic(new_vector) <-- Interrupt is raised before update
976 * is effective, i.e. it's raised on
979 * So if the target cpu cannot handle that interrupt before
980 * the old vector is cleaned up, we get a spurious interrupt
981 * and in the worst case the ioapic irq line becomes stale.
983 * But in case of cpu hotplug this should be a non issue
984 * because if the affinity update happens right before all
985 * cpus rendevouz in stop machine, there is no way that the
986 * interrupt can be blocked on the target cpu because all cpus
987 * loops first with interrupts enabled in stop machine, so the
988 * old vector is not yet cleaned up when the interrupt fires.
990 * So the only way to run into this issue is if the delivery
991 * of the interrupt on the apic/system bus would be delayed
992 * beyond the point where the target cpu disables interrupts
993 * in stop machine. I doubt that it can happen, but at least
994 * there is a theroretical chance. Virtualization might be
995 * able to expose this, but AFAICT the IOAPIC emulation is not
996 * as stupid as the real hardware.
998 * Anyway, there is nothing we can do about that at this point
999 * w/o refactoring the whole fixup_irq() business completely.
1000 * We print at least the irq number and the old vector number,
1001 * so we have the necessary information when a problem in that
1004 pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
1007 free_moved_vector(apicd);
1009 raw_spin_unlock(&vector_lock);
1012 #ifdef CONFIG_HOTPLUG_CPU
1014 * Note, this is not accurate accounting, but at least good enough to
1015 * prevent that the actual interrupt move will run out of vectors.
1017 int lapic_can_unplug_cpu(void)
1019 unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
1022 raw_spin_lock(&vector_lock);
1023 tomove = irq_matrix_allocated(vector_matrix);
1024 avl = irq_matrix_available(vector_matrix, true);
1026 pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n",
1031 rsvd = irq_matrix_reserved(vector_matrix);
1033 pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n",
1037 raw_spin_unlock(&vector_lock);
1040 #endif /* HOTPLUG_CPU */
1043 static void __init print_APIC_field(int base)
1049 for (i = 0; i < 8; i++)
1050 pr_cont("%08x", apic_read(base + i*0x10));
1055 static void __init print_local_APIC(void *dummy)
1057 unsigned int i, v, ver, maxlvt;
1060 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
1061 smp_processor_id(), hard_smp_processor_id());
1062 v = apic_read(APIC_ID);
1063 pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
1064 v = apic_read(APIC_LVR);
1065 pr_info("... APIC VERSION: %08x\n", v);
1066 ver = GET_APIC_VERSION(v);
1067 maxlvt = lapic_get_maxlvt();
1069 v = apic_read(APIC_TASKPRI);
1070 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1073 if (APIC_INTEGRATED(ver)) {
1074 if (!APIC_XAPIC(ver)) {
1075 v = apic_read(APIC_ARBPRI);
1076 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
1077 v, v & APIC_ARBPRI_MASK);
1079 v = apic_read(APIC_PROCPRI);
1080 pr_debug("... APIC PROCPRI: %08x\n", v);
1084 * Remote read supported only in the 82489DX and local APIC for
1085 * Pentium processors.
1087 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1088 v = apic_read(APIC_RRR);
1089 pr_debug("... APIC RRR: %08x\n", v);
1092 v = apic_read(APIC_LDR);
1093 pr_debug("... APIC LDR: %08x\n", v);
1094 if (!x2apic_enabled()) {
1095 v = apic_read(APIC_DFR);
1096 pr_debug("... APIC DFR: %08x\n", v);
1098 v = apic_read(APIC_SPIV);
1099 pr_debug("... APIC SPIV: %08x\n", v);
1101 pr_debug("... APIC ISR field:\n");
1102 print_APIC_field(APIC_ISR);
1103 pr_debug("... APIC TMR field:\n");
1104 print_APIC_field(APIC_TMR);
1105 pr_debug("... APIC IRR field:\n");
1106 print_APIC_field(APIC_IRR);
1109 if (APIC_INTEGRATED(ver)) {
1110 /* Due to the Pentium erratum 3AP. */
1112 apic_write(APIC_ESR, 0);
1114 v = apic_read(APIC_ESR);
1115 pr_debug("... APIC ESR: %08x\n", v);
1118 icr = apic_icr_read();
1119 pr_debug("... APIC ICR: %08x\n", (u32)icr);
1120 pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
1122 v = apic_read(APIC_LVTT);
1123 pr_debug("... APIC LVTT: %08x\n", v);
1127 v = apic_read(APIC_LVTPC);
1128 pr_debug("... APIC LVTPC: %08x\n", v);
1130 v = apic_read(APIC_LVT0);
1131 pr_debug("... APIC LVT0: %08x\n", v);
1132 v = apic_read(APIC_LVT1);
1133 pr_debug("... APIC LVT1: %08x\n", v);
1137 v = apic_read(APIC_LVTERR);
1138 pr_debug("... APIC LVTERR: %08x\n", v);
1141 v = apic_read(APIC_TMICT);
1142 pr_debug("... APIC TMICT: %08x\n", v);
1143 v = apic_read(APIC_TMCCT);
1144 pr_debug("... APIC TMCCT: %08x\n", v);
1145 v = apic_read(APIC_TDCR);
1146 pr_debug("... APIC TDCR: %08x\n", v);
1148 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1149 v = apic_read(APIC_EFEAT);
1150 maxlvt = (v >> 16) & 0xff;
1151 pr_debug("... APIC EFEAT: %08x\n", v);
1152 v = apic_read(APIC_ECTRL);
1153 pr_debug("... APIC ECTRL: %08x\n", v);
1154 for (i = 0; i < maxlvt; i++) {
1155 v = apic_read(APIC_EILVTn(i));
1156 pr_debug("... APIC EILVT%d: %08x\n", i, v);
1162 static void __init print_local_APICs(int maxcpu)
1170 for_each_online_cpu(cpu) {
1173 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1178 static void __init print_PIC(void)
1181 unsigned long flags;
1183 if (!nr_legacy_irqs())
1186 pr_debug("\nprinting PIC contents\n");
1188 raw_spin_lock_irqsave(&i8259A_lock, flags);
1190 v = inb(0xa1) << 8 | inb(0x21);
1191 pr_debug("... PIC IMR: %04x\n", v);
1193 v = inb(0xa0) << 8 | inb(0x20);
1194 pr_debug("... PIC IRR: %04x\n", v);
1198 v = inb(0xa0) << 8 | inb(0x20);
1202 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1204 pr_debug("... PIC ISR: %04x\n", v);
1206 v = inb(0x4d1) << 8 | inb(0x4d0);
1207 pr_debug("... PIC ELCR: %04x\n", v);
1210 static int show_lapic __initdata = 1;
1211 static __init int setup_show_lapic(char *arg)
1215 if (strcmp(arg, "all") == 0) {
1216 show_lapic = CONFIG_NR_CPUS;
1218 get_option(&arg, &num);
1225 __setup("show_lapic=", setup_show_lapic);
1227 static int __init print_ICs(void)
1229 if (apic_verbosity == APIC_QUIET)
1234 /* don't print out if apic is not there */
1235 if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
1238 print_local_APICs(show_lapic);
1244 late_initcall(print_ICs);