1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006 Thomas Gleixner
6 * This file contains driver APIs to the irq subsystem.
9 #define pr_fmt(fmt) "genirq: " fmt
11 #include <linux/irq.h>
12 #include <linux/kthread.h>
13 #include <linux/module.h>
14 #include <linux/random.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/isolation.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/task_work.h>
25 #include "internals.h"
27 #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
28 __read_mostly bool force_irqthreads;
29 EXPORT_SYMBOL_GPL(force_irqthreads);
31 static int __init setup_forced_irqthreads(char *arg)
33 force_irqthreads = true;
36 early_param("threadirqs", setup_forced_irqthreads);
39 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
41 struct irq_data *irqd = irq_desc_get_irq_data(desc);
48 * Wait until we're out of the critical section. This might
49 * give the wrong answer due to the lack of memory barriers.
51 while (irqd_irq_inprogress(&desc->irq_data))
54 /* Ok, that indicated we're done: double-check carefully. */
55 raw_spin_lock_irqsave(&desc->lock, flags);
56 inprogress = irqd_irq_inprogress(&desc->irq_data);
59 * If requested and supported, check at the chip whether it
60 * is in flight at the hardware level, i.e. already pending
61 * in a CPU and waiting for service and acknowledge.
63 if (!inprogress && sync_chip) {
65 * Ignore the return code. inprogress is only updated
66 * when the chip supports it.
68 __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
71 raw_spin_unlock_irqrestore(&desc->lock, flags);
73 /* Oops, that failed? */
78 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
79 * @irq: interrupt number to wait for
81 * This function waits for any pending hard IRQ handlers for this
82 * interrupt to complete before returning. If you use this
83 * function while holding a resource the IRQ handler may need you
84 * will deadlock. It does not take associated threaded handlers
87 * Do not use this for shutdown scenarios where you must be sure
88 * that all parts (hardirq and threaded handler) have completed.
90 * Returns: false if a threaded handler is active.
92 * This function may be called - with care - from IRQ context.
94 * It does not check whether there is an interrupt in flight at the
95 * hardware level, but not serviced yet, as this might deadlock when
96 * called with interrupts disabled and the target CPU of the interrupt
99 bool synchronize_hardirq(unsigned int irq)
101 struct irq_desc *desc = irq_to_desc(irq);
104 __synchronize_hardirq(desc, false);
105 return !atomic_read(&desc->threads_active);
110 EXPORT_SYMBOL(synchronize_hardirq);
113 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
114 * @irq: interrupt number to wait for
116 * This function waits for any pending IRQ handlers for this interrupt
117 * to complete before returning. If you use this function while
118 * holding a resource the IRQ handler may need you will deadlock.
120 * Can only be called from preemptible code as it might sleep when
121 * an interrupt thread is associated to @irq.
123 * It optionally makes sure (when the irq chip supports that method)
124 * that the interrupt is not pending in any CPU and waiting for
127 void synchronize_irq(unsigned int irq)
129 struct irq_desc *desc = irq_to_desc(irq);
132 __synchronize_hardirq(desc, true);
134 * We made sure that no hardirq handler is
135 * running. Now verify that no threaded handlers are
138 wait_event(desc->wait_for_threads,
139 !atomic_read(&desc->threads_active));
142 EXPORT_SYMBOL(synchronize_irq);
145 cpumask_var_t irq_default_affinity;
147 static bool __irq_can_set_affinity(struct irq_desc *desc)
149 if (!desc || !irqd_can_balance(&desc->irq_data) ||
150 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
156 * irq_can_set_affinity - Check if the affinity of a given irq can be set
157 * @irq: Interrupt to check
160 int irq_can_set_affinity(unsigned int irq)
162 return __irq_can_set_affinity(irq_to_desc(irq));
166 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
167 * @irq: Interrupt to check
169 * Like irq_can_set_affinity() above, but additionally checks for the
170 * AFFINITY_MANAGED flag.
172 bool irq_can_set_affinity_usr(unsigned int irq)
174 struct irq_desc *desc = irq_to_desc(irq);
176 return __irq_can_set_affinity(desc) &&
177 !irqd_affinity_is_managed(&desc->irq_data);
181 * irq_set_thread_affinity - Notify irq threads to adjust affinity
182 * @desc: irq descriptor which has affinity changed
184 * We just set IRQTF_AFFINITY and delegate the affinity setting
185 * to the interrupt thread itself. We can not call
186 * set_cpus_allowed_ptr() here as we hold desc->lock and this
187 * code can be called from hard interrupt context.
189 void irq_set_thread_affinity(struct irq_desc *desc)
191 struct irqaction *action;
193 for_each_action_of_desc(desc, action)
195 set_bit(IRQTF_AFFINITY, &action->thread_flags);
198 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
199 static void irq_validate_effective_affinity(struct irq_data *data)
201 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
202 struct irq_chip *chip = irq_data_get_irq_chip(data);
204 if (!cpumask_empty(m))
206 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
207 chip->name, data->irq);
210 static inline void irq_init_effective_affinity(struct irq_data *data,
211 const struct cpumask *mask)
213 cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
216 static inline void irq_validate_effective_affinity(struct irq_data *data) { }
217 static inline void irq_init_effective_affinity(struct irq_data *data,
218 const struct cpumask *mask) { }
221 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
224 struct irq_desc *desc = irq_data_to_desc(data);
225 struct irq_chip *chip = irq_data_get_irq_chip(data);
228 if (!chip || !chip->irq_set_affinity)
232 * If this is a managed interrupt and housekeeping is enabled on
233 * it check whether the requested affinity mask intersects with
234 * a housekeeping CPU. If so, then remove the isolated CPUs from
235 * the mask and just keep the housekeeping CPU(s). This prevents
236 * the affinity setter from routing the interrupt to an isolated
237 * CPU to avoid that I/O submitted from a housekeeping CPU causes
238 * interrupts on an isolated one.
240 * If the masks do not intersect or include online CPU(s) then
241 * keep the requested mask. The isolated target CPUs are only
242 * receiving interrupts when the I/O operation was submitted
243 * directly from them.
245 * If all housekeeping CPUs in the affinity mask are offline, the
246 * interrupt will be migrated by the CPU hotplug code once a
247 * housekeeping CPU which belongs to the affinity mask comes
250 if (irqd_affinity_is_managed(data) &&
251 housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) {
252 const struct cpumask *hk_mask, *prog_mask;
254 static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
255 static struct cpumask tmp_mask;
257 hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
259 raw_spin_lock(&tmp_mask_lock);
260 cpumask_and(&tmp_mask, mask, hk_mask);
261 if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
264 prog_mask = &tmp_mask;
265 ret = chip->irq_set_affinity(data, prog_mask, force);
266 raw_spin_unlock(&tmp_mask_lock);
268 ret = chip->irq_set_affinity(data, mask, force);
271 case IRQ_SET_MASK_OK:
272 case IRQ_SET_MASK_OK_DONE:
273 cpumask_copy(desc->irq_common_data.affinity, mask);
275 case IRQ_SET_MASK_OK_NOCOPY:
276 irq_validate_effective_affinity(data);
277 irq_set_thread_affinity(desc);
284 #ifdef CONFIG_GENERIC_PENDING_IRQ
285 static inline int irq_set_affinity_pending(struct irq_data *data,
286 const struct cpumask *dest)
288 struct irq_desc *desc = irq_data_to_desc(data);
290 irqd_set_move_pending(data);
291 irq_copy_pending(desc, dest);
295 static inline int irq_set_affinity_pending(struct irq_data *data,
296 const struct cpumask *dest)
302 static int irq_try_set_affinity(struct irq_data *data,
303 const struct cpumask *dest, bool force)
305 int ret = irq_do_set_affinity(data, dest, force);
308 * In case that the underlying vector management is busy and the
309 * architecture supports the generic pending mechanism then utilize
310 * this to avoid returning an error to user space.
312 if (ret == -EBUSY && !force)
313 ret = irq_set_affinity_pending(data, dest);
317 static bool irq_set_affinity_deactivated(struct irq_data *data,
318 const struct cpumask *mask, bool force)
320 struct irq_desc *desc = irq_data_to_desc(data);
323 * Handle irq chips which can handle affinity only in activated
326 * If the interrupt is not yet activated, just store the affinity
327 * mask and do not call the chip driver at all. On activation the
328 * driver has to make sure anyway that the interrupt is in a
329 * usable state so startup works.
331 if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
332 irqd_is_activated(data) || !irqd_affinity_on_activate(data))
335 cpumask_copy(desc->irq_common_data.affinity, mask);
336 irq_init_effective_affinity(data, mask);
337 irqd_set(data, IRQD_AFFINITY_SET);
341 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
344 struct irq_chip *chip = irq_data_get_irq_chip(data);
345 struct irq_desc *desc = irq_data_to_desc(data);
348 if (!chip || !chip->irq_set_affinity)
351 if (irq_set_affinity_deactivated(data, mask, force))
354 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
355 ret = irq_try_set_affinity(data, mask, force);
357 irqd_set_move_pending(data);
358 irq_copy_pending(desc, mask);
361 if (desc->affinity_notify) {
362 kref_get(&desc->affinity_notify->kref);
363 if (!schedule_work(&desc->affinity_notify->work)) {
364 /* Work was already scheduled, drop our extra ref */
365 kref_put(&desc->affinity_notify->kref,
366 desc->affinity_notify->release);
369 irqd_set(data, IRQD_AFFINITY_SET);
375 * irq_update_affinity_desc - Update affinity management for an interrupt
376 * @irq: The interrupt number to update
377 * @affinity: Pointer to the affinity descriptor
379 * This interface can be used to configure the affinity management of
380 * interrupts which have been allocated already.
382 * There are certain limitations on when it may be used - attempts to use it
383 * for when the kernel is configured for generic IRQ reservation mode (in
384 * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
385 * managed/non-managed interrupt accounting. In addition, attempts to use it on
386 * an interrupt which is already started or which has already been configured
387 * as managed will also fail, as these mean invalid init state or double init.
389 int irq_update_affinity_desc(unsigned int irq,
390 struct irq_affinity_desc *affinity)
392 struct irq_desc *desc;
398 * Supporting this with the reservation scheme used by x86 needs
399 * some more thought. Fail it for now.
401 if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
404 desc = irq_get_desc_buslock(irq, &flags, 0);
408 /* Requires the interrupt to be shut down */
409 if (irqd_is_started(&desc->irq_data)) {
414 /* Interrupts which are already managed cannot be modified */
415 if (irqd_affinity_is_managed(&desc->irq_data)) {
421 * Deactivate the interrupt. That's required to undo
422 * anything an earlier activation has established.
424 activated = irqd_is_activated(&desc->irq_data);
426 irq_domain_deactivate_irq(&desc->irq_data);
428 if (affinity->is_managed) {
429 irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
430 irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
433 cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
435 /* Restore the activation state */
437 irq_domain_activate_irq(&desc->irq_data, false);
440 irq_put_desc_busunlock(desc, flags);
444 static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
447 struct irq_desc *desc = irq_to_desc(irq);
454 raw_spin_lock_irqsave(&desc->lock, flags);
455 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
456 raw_spin_unlock_irqrestore(&desc->lock, flags);
461 * irq_set_affinity - Set the irq affinity of a given irq
462 * @irq: Interrupt to set affinity
465 * Fails if cpumask does not contain an online CPU
467 int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
469 return __irq_set_affinity(irq, cpumask, false);
471 EXPORT_SYMBOL_GPL(irq_set_affinity);
474 * irq_force_affinity - Force the irq affinity of a given irq
475 * @irq: Interrupt to set affinity
478 * Same as irq_set_affinity, but without checking the mask against
481 * Solely for low level cpu hotplug code, where we need to make per
482 * cpu interrupts affine before the cpu becomes online.
484 int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
486 return __irq_set_affinity(irq, cpumask, true);
488 EXPORT_SYMBOL_GPL(irq_force_affinity);
490 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
493 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
497 desc->affinity_hint = m;
498 irq_put_desc_unlock(desc, flags);
499 /* set the initial affinity to prevent every interrupt being on CPU0 */
501 __irq_set_affinity(irq, m, false);
504 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
506 static void irq_affinity_notify(struct work_struct *work)
508 struct irq_affinity_notify *notify =
509 container_of(work, struct irq_affinity_notify, work);
510 struct irq_desc *desc = irq_to_desc(notify->irq);
511 cpumask_var_t cpumask;
514 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
517 raw_spin_lock_irqsave(&desc->lock, flags);
518 if (irq_move_pending(&desc->irq_data))
519 irq_get_pending(cpumask, desc);
521 cpumask_copy(cpumask, desc->irq_common_data.affinity);
522 raw_spin_unlock_irqrestore(&desc->lock, flags);
524 notify->notify(notify, cpumask);
526 free_cpumask_var(cpumask);
528 kref_put(¬ify->kref, notify->release);
532 * irq_set_affinity_notifier - control notification of IRQ affinity changes
533 * @irq: Interrupt for which to enable/disable notification
534 * @notify: Context for notification, or %NULL to disable
535 * notification. Function pointers must be initialised;
536 * the other fields will be initialised by this function.
538 * Must be called in process context. Notification may only be enabled
539 * after the IRQ is allocated and must be disabled before the IRQ is
540 * freed using free_irq().
543 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
545 struct irq_desc *desc = irq_to_desc(irq);
546 struct irq_affinity_notify *old_notify;
549 /* The release function is promised process context */
552 if (!desc || desc->istate & IRQS_NMI)
555 /* Complete initialisation of *notify */
558 kref_init(¬ify->kref);
559 INIT_WORK(¬ify->work, irq_affinity_notify);
562 raw_spin_lock_irqsave(&desc->lock, flags);
563 old_notify = desc->affinity_notify;
564 desc->affinity_notify = notify;
565 raw_spin_unlock_irqrestore(&desc->lock, flags);
568 if (cancel_work_sync(&old_notify->work)) {
569 /* Pending work had a ref, put that one too */
570 kref_put(&old_notify->kref, old_notify->release);
572 kref_put(&old_notify->kref, old_notify->release);
577 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
579 #ifndef CONFIG_AUTO_IRQ_AFFINITY
581 * Generic version of the affinity autoselector.
583 int irq_setup_affinity(struct irq_desc *desc)
585 struct cpumask *set = irq_default_affinity;
586 int ret, node = irq_desc_get_node(desc);
587 static DEFINE_RAW_SPINLOCK(mask_lock);
588 static struct cpumask mask;
590 /* Excludes PER_CPU and NO_BALANCE interrupts */
591 if (!__irq_can_set_affinity(desc))
594 raw_spin_lock(&mask_lock);
596 * Preserve the managed affinity setting and a userspace affinity
597 * setup, but make sure that one of the targets is online.
599 if (irqd_affinity_is_managed(&desc->irq_data) ||
600 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
601 if (cpumask_intersects(desc->irq_common_data.affinity,
603 set = desc->irq_common_data.affinity;
605 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
608 cpumask_and(&mask, cpu_online_mask, set);
609 if (cpumask_empty(&mask))
610 cpumask_copy(&mask, cpu_online_mask);
612 if (node != NUMA_NO_NODE) {
613 const struct cpumask *nodemask = cpumask_of_node(node);
615 /* make sure at least one of the cpus in nodemask is online */
616 if (cpumask_intersects(&mask, nodemask))
617 cpumask_and(&mask, &mask, nodemask);
619 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
620 raw_spin_unlock(&mask_lock);
624 /* Wrapper for ALPHA specific affinity selector magic */
625 int irq_setup_affinity(struct irq_desc *desc)
627 return irq_select_affinity(irq_desc_get_irq(desc));
629 #endif /* CONFIG_AUTO_IRQ_AFFINITY */
630 #endif /* CONFIG_SMP */
634 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
635 * @irq: interrupt number to set affinity
636 * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
637 * specific data for percpu_devid interrupts
639 * This function uses the vCPU specific data to set the vCPU
640 * affinity for an irq. The vCPU specific data is passed from
641 * outside, such as KVM. One example code path is as below:
642 * KVM -> IOMMU -> irq_set_vcpu_affinity().
644 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
647 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
648 struct irq_data *data;
649 struct irq_chip *chip;
655 data = irq_desc_get_irq_data(desc);
657 chip = irq_data_get_irq_chip(data);
658 if (chip && chip->irq_set_vcpu_affinity)
660 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
661 data = data->parent_data;
668 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
669 irq_put_desc_unlock(desc, flags);
673 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
675 void __disable_irq(struct irq_desc *desc)
681 static int __disable_irq_nosync(unsigned int irq)
684 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
689 irq_put_desc_busunlock(desc, flags);
694 * disable_irq_nosync - disable an irq without waiting
695 * @irq: Interrupt to disable
697 * Disable the selected interrupt line. Disables and Enables are
699 * Unlike disable_irq(), this function does not ensure existing
700 * instances of the IRQ handler have completed before returning.
702 * This function may be called from IRQ context.
704 void disable_irq_nosync(unsigned int irq)
706 __disable_irq_nosync(irq);
708 EXPORT_SYMBOL(disable_irq_nosync);
711 * disable_irq - disable an irq and wait for completion
712 * @irq: Interrupt to disable
714 * Disable the selected interrupt line. Enables and Disables are
716 * This function waits for any pending IRQ handlers for this interrupt
717 * to complete before returning. If you use this function while
718 * holding a resource the IRQ handler may need you will deadlock.
720 * This function may be called - with care - from IRQ context.
722 void disable_irq(unsigned int irq)
724 if (!__disable_irq_nosync(irq))
725 synchronize_irq(irq);
727 EXPORT_SYMBOL(disable_irq);
730 * disable_hardirq - disables an irq and waits for hardirq completion
731 * @irq: Interrupt to disable
733 * Disable the selected interrupt line. Enables and Disables are
735 * This function waits for any pending hard IRQ handlers for this
736 * interrupt to complete before returning. If you use this function while
737 * holding a resource the hard IRQ handler may need you will deadlock.
739 * When used to optimistically disable an interrupt from atomic context
740 * the return value must be checked.
742 * Returns: false if a threaded handler is active.
744 * This function may be called - with care - from IRQ context.
746 bool disable_hardirq(unsigned int irq)
748 if (!__disable_irq_nosync(irq))
749 return synchronize_hardirq(irq);
753 EXPORT_SYMBOL_GPL(disable_hardirq);
756 * disable_nmi_nosync - disable an nmi without waiting
757 * @irq: Interrupt to disable
759 * Disable the selected interrupt line. Disables and enables are
761 * The interrupt to disable must have been requested through request_nmi.
762 * Unlike disable_nmi(), this function does not ensure existing
763 * instances of the IRQ handler have completed before returning.
765 void disable_nmi_nosync(unsigned int irq)
767 disable_irq_nosync(irq);
770 void __enable_irq(struct irq_desc *desc)
772 switch (desc->depth) {
775 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
776 irq_desc_get_irq(desc));
779 if (desc->istate & IRQS_SUSPENDED)
781 /* Prevent probing on this irq: */
782 irq_settings_set_noprobe(desc);
784 * Call irq_startup() not irq_enable() here because the
785 * interrupt might be marked NOAUTOEN. So irq_startup()
786 * needs to be invoked when it gets enabled the first
787 * time. If it was already started up, then irq_startup()
788 * will invoke irq_enable() under the hood.
790 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
799 * enable_irq - enable handling of an irq
800 * @irq: Interrupt to enable
802 * Undoes the effect of one call to disable_irq(). If this
803 * matches the last disable, processing of interrupts on this
804 * IRQ line is re-enabled.
806 * This function may be called from IRQ context only when
807 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
809 void enable_irq(unsigned int irq)
812 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
816 if (WARN(!desc->irq_data.chip,
817 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
822 irq_put_desc_busunlock(desc, flags);
824 EXPORT_SYMBOL(enable_irq);
827 * enable_nmi - enable handling of an nmi
828 * @irq: Interrupt to enable
830 * The interrupt to enable must have been requested through request_nmi.
831 * Undoes the effect of one call to disable_nmi(). If this
832 * matches the last disable, processing of interrupts on this
833 * IRQ line is re-enabled.
835 void enable_nmi(unsigned int irq)
840 static int set_irq_wake_real(unsigned int irq, unsigned int on)
842 struct irq_desc *desc = irq_to_desc(irq);
845 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
848 if (desc->irq_data.chip->irq_set_wake)
849 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
855 * irq_set_irq_wake - control irq power management wakeup
856 * @irq: interrupt to control
857 * @on: enable/disable power management wakeup
859 * Enable/disable power management wakeup mode, which is
860 * disabled by default. Enables and disables must match,
861 * just as they match for non-wakeup mode support.
863 * Wakeup mode lets this IRQ wake the system from sleep
864 * states like "suspend to RAM".
866 * Note: irq enable/disable state is completely orthogonal
867 * to the enable/disable state of irq wake. An irq can be
868 * disabled with disable_irq() and still wake the system as
869 * long as the irq has wake enabled. If this does not hold,
870 * then the underlying irq chip and the related driver need
871 * to be investigated.
873 int irq_set_irq_wake(unsigned int irq, unsigned int on)
876 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
882 /* Don't use NMIs as wake up interrupts please */
883 if (desc->istate & IRQS_NMI) {
888 /* wakeup-capable irqs can be shared between drivers that
889 * don't need to have the same sleep mode behaviors.
892 if (desc->wake_depth++ == 0) {
893 ret = set_irq_wake_real(irq, on);
895 desc->wake_depth = 0;
897 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
900 if (desc->wake_depth == 0) {
901 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
902 } else if (--desc->wake_depth == 0) {
903 ret = set_irq_wake_real(irq, on);
905 desc->wake_depth = 1;
907 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
912 irq_put_desc_busunlock(desc, flags);
915 EXPORT_SYMBOL(irq_set_irq_wake);
918 * Internal function that tells the architecture code whether a
919 * particular irq has been exclusively allocated or is available
922 int can_request_irq(unsigned int irq, unsigned long irqflags)
925 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
931 if (irq_settings_can_request(desc)) {
933 irqflags & desc->action->flags & IRQF_SHARED)
936 irq_put_desc_unlock(desc, flags);
940 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
942 struct irq_chip *chip = desc->irq_data.chip;
945 if (!chip || !chip->irq_set_type) {
947 * IRQF_TRIGGER_* but the PIC does not support multiple
950 pr_debug("No set_type function for IRQ %d (%s)\n",
951 irq_desc_get_irq(desc),
952 chip ? (chip->name ? : "unknown") : "unknown");
956 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
957 if (!irqd_irq_masked(&desc->irq_data))
959 if (!irqd_irq_disabled(&desc->irq_data))
963 /* Mask all flags except trigger mode */
964 flags &= IRQ_TYPE_SENSE_MASK;
965 ret = chip->irq_set_type(&desc->irq_data, flags);
968 case IRQ_SET_MASK_OK:
969 case IRQ_SET_MASK_OK_DONE:
970 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
971 irqd_set(&desc->irq_data, flags);
974 case IRQ_SET_MASK_OK_NOCOPY:
975 flags = irqd_get_trigger_type(&desc->irq_data);
976 irq_settings_set_trigger_mask(desc, flags);
977 irqd_clear(&desc->irq_data, IRQD_LEVEL);
978 irq_settings_clr_level(desc);
979 if (flags & IRQ_TYPE_LEVEL_MASK) {
980 irq_settings_set_level(desc);
981 irqd_set(&desc->irq_data, IRQD_LEVEL);
987 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
988 flags, irq_desc_get_irq(desc), chip->irq_set_type);
995 #ifdef CONFIG_HARDIRQS_SW_RESEND
996 int irq_set_parent(int irq, int parent_irq)
999 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1004 desc->parent_irq = parent_irq;
1006 irq_put_desc_unlock(desc, flags);
1009 EXPORT_SYMBOL_GPL(irq_set_parent);
1013 * Default primary interrupt handler for threaded interrupts. Is
1014 * assigned as primary handler when request_threaded_irq is called
1015 * with handler == NULL. Useful for oneshot interrupts.
1017 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
1019 return IRQ_WAKE_THREAD;
1023 * Primary handler for nested threaded interrupts. Should never be
1026 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
1028 WARN(1, "Primary handler called for nested irq %d\n", irq);
1032 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
1034 WARN(1, "Secondary action handler called for irq %d\n", irq);
1038 static int irq_wait_for_interrupt(struct irqaction *action)
1041 set_current_state(TASK_INTERRUPTIBLE);
1043 if (kthread_should_stop()) {
1044 /* may need to run one last time */
1045 if (test_and_clear_bit(IRQTF_RUNTHREAD,
1046 &action->thread_flags)) {
1047 __set_current_state(TASK_RUNNING);
1050 __set_current_state(TASK_RUNNING);
1054 if (test_and_clear_bit(IRQTF_RUNTHREAD,
1055 &action->thread_flags)) {
1056 __set_current_state(TASK_RUNNING);
1064 * Oneshot interrupts keep the irq line masked until the threaded
1065 * handler finished. unmask if the interrupt has not been disabled and
1068 static void irq_finalize_oneshot(struct irq_desc *desc,
1069 struct irqaction *action)
1071 if (!(desc->istate & IRQS_ONESHOT) ||
1072 action->handler == irq_forced_secondary_handler)
1075 chip_bus_lock(desc);
1076 raw_spin_lock_irq(&desc->lock);
1079 * Implausible though it may be we need to protect us against
1080 * the following scenario:
1082 * The thread is faster done than the hard interrupt handler
1083 * on the other CPU. If we unmask the irq line then the
1084 * interrupt can come in again and masks the line, leaves due
1085 * to IRQS_INPROGRESS and the irq line is masked forever.
1087 * This also serializes the state of shared oneshot handlers
1088 * versus "desc->threads_oneshot |= action->thread_mask;" in
1089 * irq_wake_thread(). See the comment there which explains the
1092 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
1093 raw_spin_unlock_irq(&desc->lock);
1094 chip_bus_sync_unlock(desc);
1100 * Now check again, whether the thread should run. Otherwise
1101 * we would clear the threads_oneshot bit of this thread which
1104 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1107 desc->threads_oneshot &= ~action->thread_mask;
1109 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
1110 irqd_irq_masked(&desc->irq_data))
1111 unmask_threaded_irq(desc);
1114 raw_spin_unlock_irq(&desc->lock);
1115 chip_bus_sync_unlock(desc);
1120 * Check whether we need to change the affinity of the interrupt thread.
1123 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1128 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1132 * In case we are out of memory we set IRQTF_AFFINITY again and
1133 * try again next time
1135 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1136 set_bit(IRQTF_AFFINITY, &action->thread_flags);
1140 raw_spin_lock_irq(&desc->lock);
1142 * This code is triggered unconditionally. Check the affinity
1143 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1145 if (cpumask_available(desc->irq_common_data.affinity)) {
1146 const struct cpumask *m;
1148 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1149 cpumask_copy(mask, m);
1153 raw_spin_unlock_irq(&desc->lock);
1156 set_cpus_allowed_ptr(current, mask);
1157 free_cpumask_var(mask);
1161 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1165 * Interrupts which are not explicitly requested as threaded
1166 * interrupts rely on the implicit bh/preempt disable of the hard irq
1167 * context. So we need to disable bh here to avoid deadlocks and other
1171 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1176 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1177 local_irq_disable();
1178 ret = action->thread_fn(action->irq, action->dev_id);
1179 if (ret == IRQ_HANDLED)
1180 atomic_inc(&desc->threads_handled);
1182 irq_finalize_oneshot(desc, action);
1183 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1190 * Interrupts explicitly requested as threaded interrupts want to be
1191 * preemptible - many of them need to sleep and wait for slow busses to
1194 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1195 struct irqaction *action)
1199 ret = action->thread_fn(action->irq, action->dev_id);
1200 if (ret == IRQ_HANDLED)
1201 atomic_inc(&desc->threads_handled);
1203 irq_finalize_oneshot(desc, action);
1207 static void wake_threads_waitq(struct irq_desc *desc)
1209 if (atomic_dec_and_test(&desc->threads_active))
1210 wake_up(&desc->wait_for_threads);
1213 static void irq_thread_dtor(struct callback_head *unused)
1215 struct task_struct *tsk = current;
1216 struct irq_desc *desc;
1217 struct irqaction *action;
1219 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1222 action = kthread_data(tsk);
1224 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1225 tsk->comm, tsk->pid, action->irq);
1228 desc = irq_to_desc(action->irq);
1230 * If IRQTF_RUNTHREAD is set, we need to decrement
1231 * desc->threads_active and wake possible waiters.
1233 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1234 wake_threads_waitq(desc);
1236 /* Prevent a stale desc->threads_oneshot */
1237 irq_finalize_oneshot(desc, action);
1240 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1242 struct irqaction *secondary = action->secondary;
1244 if (WARN_ON_ONCE(!secondary))
1247 raw_spin_lock_irq(&desc->lock);
1248 __irq_wake_thread(desc, secondary);
1249 raw_spin_unlock_irq(&desc->lock);
1253 * Interrupt handler thread
1255 static int irq_thread(void *data)
1257 struct callback_head on_exit_work;
1258 struct irqaction *action = data;
1259 struct irq_desc *desc = irq_to_desc(action->irq);
1260 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1261 struct irqaction *action);
1263 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1264 &action->thread_flags))
1265 handler_fn = irq_forced_thread_fn;
1267 handler_fn = irq_thread_fn;
1269 init_task_work(&on_exit_work, irq_thread_dtor);
1270 task_work_add(current, &on_exit_work, TWA_NONE);
1272 irq_thread_check_affinity(desc, action);
1274 while (!irq_wait_for_interrupt(action)) {
1275 irqreturn_t action_ret;
1277 irq_thread_check_affinity(desc, action);
1279 action_ret = handler_fn(desc, action);
1280 if (action_ret == IRQ_WAKE_THREAD)
1281 irq_wake_secondary(desc, action);
1283 wake_threads_waitq(desc);
1287 * This is the regular exit path. __free_irq() is stopping the
1288 * thread via kthread_stop() after calling
1289 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1290 * oneshot mask bit can be set.
1292 task_work_cancel(current, irq_thread_dtor);
1297 * irq_wake_thread - wake the irq thread for the action identified by dev_id
1298 * @irq: Interrupt line
1299 * @dev_id: Device identity for which the thread should be woken
1302 void irq_wake_thread(unsigned int irq, void *dev_id)
1304 struct irq_desc *desc = irq_to_desc(irq);
1305 struct irqaction *action;
1306 unsigned long flags;
1308 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1311 raw_spin_lock_irqsave(&desc->lock, flags);
1312 for_each_action_of_desc(desc, action) {
1313 if (action->dev_id == dev_id) {
1315 __irq_wake_thread(desc, action);
1319 raw_spin_unlock_irqrestore(&desc->lock, flags);
1321 EXPORT_SYMBOL_GPL(irq_wake_thread);
1323 static int irq_setup_forced_threading(struct irqaction *new)
1325 if (!force_irqthreads)
1327 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1331 * No further action required for interrupts which are requested as
1332 * threaded interrupts already
1334 if (new->handler == irq_default_primary_handler)
1337 new->flags |= IRQF_ONESHOT;
1340 * Handle the case where we have a real primary handler and a
1341 * thread handler. We force thread them as well by creating a
1344 if (new->handler && new->thread_fn) {
1345 /* Allocate the secondary action */
1346 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1347 if (!new->secondary)
1349 new->secondary->handler = irq_forced_secondary_handler;
1350 new->secondary->thread_fn = new->thread_fn;
1351 new->secondary->dev_id = new->dev_id;
1352 new->secondary->irq = new->irq;
1353 new->secondary->name = new->name;
1355 /* Deal with the primary handler */
1356 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1357 new->thread_fn = new->handler;
1358 new->handler = irq_default_primary_handler;
1362 static int irq_request_resources(struct irq_desc *desc)
1364 struct irq_data *d = &desc->irq_data;
1365 struct irq_chip *c = d->chip;
1367 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1370 static void irq_release_resources(struct irq_desc *desc)
1372 struct irq_data *d = &desc->irq_data;
1373 struct irq_chip *c = d->chip;
1375 if (c->irq_release_resources)
1376 c->irq_release_resources(d);
1379 static bool irq_supports_nmi(struct irq_desc *desc)
1381 struct irq_data *d = irq_desc_get_irq_data(desc);
1383 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1384 /* Only IRQs directly managed by the root irqchip can be set as NMI */
1388 /* Don't support NMIs for chips behind a slow bus */
1389 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1392 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1395 static int irq_nmi_setup(struct irq_desc *desc)
1397 struct irq_data *d = irq_desc_get_irq_data(desc);
1398 struct irq_chip *c = d->chip;
1400 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1403 static void irq_nmi_teardown(struct irq_desc *desc)
1405 struct irq_data *d = irq_desc_get_irq_data(desc);
1406 struct irq_chip *c = d->chip;
1408 if (c->irq_nmi_teardown)
1409 c->irq_nmi_teardown(d);
1413 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1415 struct task_struct *t;
1418 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1421 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1431 * We keep the reference to the task struct even if
1432 * the thread dies to avoid that the interrupt code
1433 * references an already freed task_struct.
1435 new->thread = get_task_struct(t);
1437 * Tell the thread to set its affinity. This is
1438 * important for shared interrupt handlers as we do
1439 * not invoke setup_affinity() for the secondary
1440 * handlers as everything is already set up. Even for
1441 * interrupts marked with IRQF_NO_BALANCE this is
1442 * correct as we want the thread to move to the cpu(s)
1443 * on which the requesting code placed the interrupt.
1445 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1450 * Internal function to register an irqaction - typically used to
1451 * allocate special interrupts that are part of the architecture.
1455 * desc->request_mutex Provides serialization against a concurrent free_irq()
1456 * chip_bus_lock Provides serialization for slow bus operations
1457 * desc->lock Provides serialization against hard interrupts
1459 * chip_bus_lock and desc->lock are sufficient for all other management and
1460 * interrupt related functions. desc->request_mutex solely serializes
1461 * request/free_irq().
1464 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1466 struct irqaction *old, **old_ptr;
1467 unsigned long flags, thread_mask = 0;
1468 int ret, nested, shared = 0;
1473 if (desc->irq_data.chip == &no_irq_chip)
1475 if (!try_module_get(desc->owner))
1481 * If the trigger type is not specified by the caller,
1482 * then use the default for this interrupt.
1484 if (!(new->flags & IRQF_TRIGGER_MASK))
1485 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1488 * Check whether the interrupt nests into another interrupt
1491 nested = irq_settings_is_nested_thread(desc);
1493 if (!new->thread_fn) {
1498 * Replace the primary handler which was provided from
1499 * the driver for non nested interrupt handling by the
1500 * dummy function which warns when called.
1502 new->handler = irq_nested_primary_handler;
1504 if (irq_settings_can_thread(desc)) {
1505 ret = irq_setup_forced_threading(new);
1512 * Create a handler thread when a thread function is supplied
1513 * and the interrupt does not nest into another interrupt
1516 if (new->thread_fn && !nested) {
1517 ret = setup_irq_thread(new, irq, false);
1520 if (new->secondary) {
1521 ret = setup_irq_thread(new->secondary, irq, true);
1528 * Drivers are often written to work w/o knowledge about the
1529 * underlying irq chip implementation, so a request for a
1530 * threaded irq without a primary hard irq context handler
1531 * requires the ONESHOT flag to be set. Some irq chips like
1532 * MSI based interrupts are per se one shot safe. Check the
1533 * chip flags, so we can avoid the unmask dance at the end of
1534 * the threaded handler for those.
1536 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1537 new->flags &= ~IRQF_ONESHOT;
1540 * Protects against a concurrent __free_irq() call which might wait
1541 * for synchronize_hardirq() to complete without holding the optional
1542 * chip bus lock and desc->lock. Also protects against handing out
1543 * a recycled oneshot thread_mask bit while it's still in use by
1544 * its previous owner.
1546 mutex_lock(&desc->request_mutex);
1549 * Acquire bus lock as the irq_request_resources() callback below
1550 * might rely on the serialization or the magic power management
1551 * functions which are abusing the irq_bus_lock() callback,
1553 chip_bus_lock(desc);
1555 /* First installed action requests resources. */
1556 if (!desc->action) {
1557 ret = irq_request_resources(desc);
1559 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1560 new->name, irq, desc->irq_data.chip->name);
1561 goto out_bus_unlock;
1566 * The following block of code has to be executed atomically
1567 * protected against a concurrent interrupt and any of the other
1568 * management calls which are not serialized via
1569 * desc->request_mutex or the optional bus lock.
1571 raw_spin_lock_irqsave(&desc->lock, flags);
1572 old_ptr = &desc->action;
1576 * Can't share interrupts unless both agree to and are
1577 * the same type (level, edge, polarity). So both flag
1578 * fields must have IRQF_SHARED set and the bits which
1579 * set the trigger type must match. Also all must
1581 * Interrupt lines used for NMIs cannot be shared.
1583 unsigned int oldtype;
1585 if (desc->istate & IRQS_NMI) {
1586 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1587 new->name, irq, desc->irq_data.chip->name);
1593 * If nobody did set the configuration before, inherit
1594 * the one provided by the requester.
1596 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1597 oldtype = irqd_get_trigger_type(&desc->irq_data);
1599 oldtype = new->flags & IRQF_TRIGGER_MASK;
1600 irqd_set_trigger_type(&desc->irq_data, oldtype);
1603 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1604 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1605 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1608 /* All handlers must agree on per-cpuness */
1609 if ((old->flags & IRQF_PERCPU) !=
1610 (new->flags & IRQF_PERCPU))
1613 /* add new interrupt at end of irq queue */
1616 * Or all existing action->thread_mask bits,
1617 * so we can find the next zero bit for this
1620 thread_mask |= old->thread_mask;
1621 old_ptr = &old->next;
1628 * Setup the thread mask for this irqaction for ONESHOT. For
1629 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1630 * conditional in irq_wake_thread().
1632 if (new->flags & IRQF_ONESHOT) {
1634 * Unlikely to have 32 resp 64 irqs sharing one line,
1637 if (thread_mask == ~0UL) {
1642 * The thread_mask for the action is or'ed to
1643 * desc->thread_active to indicate that the
1644 * IRQF_ONESHOT thread handler has been woken, but not
1645 * yet finished. The bit is cleared when a thread
1646 * completes. When all threads of a shared interrupt
1647 * line have completed desc->threads_active becomes
1648 * zero and the interrupt line is unmasked. See
1649 * handle.c:irq_wake_thread() for further information.
1651 * If no thread is woken by primary (hard irq context)
1652 * interrupt handlers, then desc->threads_active is
1653 * also checked for zero to unmask the irq line in the
1654 * affected hard irq flow handlers
1655 * (handle_[fasteoi|level]_irq).
1657 * The new action gets the first zero bit of
1658 * thread_mask assigned. See the loop above which or's
1659 * all existing action->thread_mask bits.
1661 new->thread_mask = 1UL << ffz(thread_mask);
1663 } else if (new->handler == irq_default_primary_handler &&
1664 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1666 * The interrupt was requested with handler = NULL, so
1667 * we use the default primary handler for it. But it
1668 * does not have the oneshot flag set. In combination
1669 * with level interrupts this is deadly, because the
1670 * default primary handler just wakes the thread, then
1671 * the irq lines is reenabled, but the device still
1672 * has the level irq asserted. Rinse and repeat....
1674 * While this works for edge type interrupts, we play
1675 * it safe and reject unconditionally because we can't
1676 * say for sure which type this interrupt really
1677 * has. The type flags are unreliable as the
1678 * underlying chip implementation can override them.
1680 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1687 init_waitqueue_head(&desc->wait_for_threads);
1689 /* Setup the type (level, edge polarity) if configured: */
1690 if (new->flags & IRQF_TRIGGER_MASK) {
1691 ret = __irq_set_trigger(desc,
1692 new->flags & IRQF_TRIGGER_MASK);
1699 * Activate the interrupt. That activation must happen
1700 * independently of IRQ_NOAUTOEN. request_irq() can fail
1701 * and the callers are supposed to handle
1702 * that. enable_irq() of an interrupt requested with
1703 * IRQ_NOAUTOEN is not supposed to fail. The activation
1704 * keeps it in shutdown mode, it merily associates
1705 * resources if necessary and if that's not possible it
1706 * fails. Interrupts which are in managed shutdown mode
1707 * will simply ignore that activation request.
1709 ret = irq_activate(desc);
1713 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1714 IRQS_ONESHOT | IRQS_WAITING);
1715 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1717 if (new->flags & IRQF_PERCPU) {
1718 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1719 irq_settings_set_per_cpu(desc);
1722 if (new->flags & IRQF_ONESHOT)
1723 desc->istate |= IRQS_ONESHOT;
1725 /* Exclude IRQ from balancing if requested */
1726 if (new->flags & IRQF_NOBALANCING) {
1727 irq_settings_set_no_balancing(desc);
1728 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1731 if (!(new->flags & IRQF_NO_AUTOEN) &&
1732 irq_settings_can_autoenable(desc)) {
1733 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1736 * Shared interrupts do not go well with disabling
1737 * auto enable. The sharing interrupt might request
1738 * it while it's still disabled and then wait for
1739 * interrupts forever.
1741 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1742 /* Undo nested disables: */
1746 } else if (new->flags & IRQF_TRIGGER_MASK) {
1747 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1748 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1751 /* hope the handler works with current trigger mode */
1752 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1758 irq_pm_install_action(desc, new);
1760 /* Reset broken irq detection when installing new handler */
1761 desc->irq_count = 0;
1762 desc->irqs_unhandled = 0;
1765 * Check whether we disabled the irq via the spurious handler
1766 * before. Reenable it and give it another chance.
1768 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1769 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1773 raw_spin_unlock_irqrestore(&desc->lock, flags);
1774 chip_bus_sync_unlock(desc);
1775 mutex_unlock(&desc->request_mutex);
1777 irq_setup_timings(desc, new);
1780 * Strictly no need to wake it up, but hung_task complains
1781 * when no hard interrupt wakes the thread up.
1784 wake_up_process(new->thread);
1786 wake_up_process(new->secondary->thread);
1788 register_irq_proc(irq, desc);
1790 register_handler_proc(irq, new);
1794 if (!(new->flags & IRQF_PROBE_SHARED)) {
1795 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1796 irq, new->flags, new->name, old->flags, old->name);
1797 #ifdef CONFIG_DEBUG_SHIRQ
1804 raw_spin_unlock_irqrestore(&desc->lock, flags);
1807 irq_release_resources(desc);
1809 chip_bus_sync_unlock(desc);
1810 mutex_unlock(&desc->request_mutex);
1814 struct task_struct *t = new->thread;
1820 if (new->secondary && new->secondary->thread) {
1821 struct task_struct *t = new->secondary->thread;
1823 new->secondary->thread = NULL;
1828 module_put(desc->owner);
1833 * Internal function to unregister an irqaction - used to free
1834 * regular and special interrupts that are part of the architecture.
1836 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1838 unsigned irq = desc->irq_data.irq;
1839 struct irqaction *action, **action_ptr;
1840 unsigned long flags;
1842 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1844 mutex_lock(&desc->request_mutex);
1845 chip_bus_lock(desc);
1846 raw_spin_lock_irqsave(&desc->lock, flags);
1849 * There can be multiple actions per IRQ descriptor, find the right
1850 * one based on the dev_id:
1852 action_ptr = &desc->action;
1854 action = *action_ptr;
1857 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1858 raw_spin_unlock_irqrestore(&desc->lock, flags);
1859 chip_bus_sync_unlock(desc);
1860 mutex_unlock(&desc->request_mutex);
1864 if (action->dev_id == dev_id)
1866 action_ptr = &action->next;
1869 /* Found it - now remove it from the list of entries: */
1870 *action_ptr = action->next;
1872 irq_pm_remove_action(desc, action);
1874 /* If this was the last handler, shut down the IRQ line: */
1875 if (!desc->action) {
1876 irq_settings_clr_disable_unlazy(desc);
1877 /* Only shutdown. Deactivate after synchronize_hardirq() */
1882 /* make sure affinity_hint is cleaned up */
1883 if (WARN_ON_ONCE(desc->affinity_hint))
1884 desc->affinity_hint = NULL;
1887 raw_spin_unlock_irqrestore(&desc->lock, flags);
1889 * Drop bus_lock here so the changes which were done in the chip
1890 * callbacks above are synced out to the irq chips which hang
1891 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1893 * Aside of that the bus_lock can also be taken from the threaded
1894 * handler in irq_finalize_oneshot() which results in a deadlock
1895 * because kthread_stop() would wait forever for the thread to
1896 * complete, which is blocked on the bus lock.
1898 * The still held desc->request_mutex() protects against a
1899 * concurrent request_irq() of this irq so the release of resources
1900 * and timing data is properly serialized.
1902 chip_bus_sync_unlock(desc);
1904 unregister_handler_proc(irq, action);
1907 * Make sure it's not being used on another CPU and if the chip
1908 * supports it also make sure that there is no (not yet serviced)
1909 * interrupt in flight at the hardware level.
1911 __synchronize_hardirq(desc, true);
1913 #ifdef CONFIG_DEBUG_SHIRQ
1915 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1916 * event to happen even now it's being freed, so let's make sure that
1917 * is so by doing an extra call to the handler ....
1919 * ( We do this after actually deregistering it, to make sure that a
1920 * 'real' IRQ doesn't run in parallel with our fake. )
1922 if (action->flags & IRQF_SHARED) {
1923 local_irq_save(flags);
1924 action->handler(irq, dev_id);
1925 local_irq_restore(flags);
1930 * The action has already been removed above, but the thread writes
1931 * its oneshot mask bit when it completes. Though request_mutex is
1932 * held across this which prevents __setup_irq() from handing out
1933 * the same bit to a newly requested action.
1935 if (action->thread) {
1936 kthread_stop(action->thread);
1937 put_task_struct(action->thread);
1938 if (action->secondary && action->secondary->thread) {
1939 kthread_stop(action->secondary->thread);
1940 put_task_struct(action->secondary->thread);
1944 /* Last action releases resources */
1945 if (!desc->action) {
1947 * Reacquire bus lock as irq_release_resources() might
1948 * require it to deallocate resources over the slow bus.
1950 chip_bus_lock(desc);
1952 * There is no interrupt on the fly anymore. Deactivate it
1955 raw_spin_lock_irqsave(&desc->lock, flags);
1956 irq_domain_deactivate_irq(&desc->irq_data);
1957 raw_spin_unlock_irqrestore(&desc->lock, flags);
1959 irq_release_resources(desc);
1960 chip_bus_sync_unlock(desc);
1961 irq_remove_timings(desc);
1964 mutex_unlock(&desc->request_mutex);
1966 irq_chip_pm_put(&desc->irq_data);
1967 module_put(desc->owner);
1968 kfree(action->secondary);
1973 * free_irq - free an interrupt allocated with request_irq
1974 * @irq: Interrupt line to free
1975 * @dev_id: Device identity to free
1977 * Remove an interrupt handler. The handler is removed and if the
1978 * interrupt line is no longer in use by any driver it is disabled.
1979 * On a shared IRQ the caller must ensure the interrupt is disabled
1980 * on the card it drives before calling this function. The function
1981 * does not return until any executing interrupts for this IRQ
1984 * This function must not be called from interrupt context.
1986 * Returns the devname argument passed to request_irq.
1988 const void *free_irq(unsigned int irq, void *dev_id)
1990 struct irq_desc *desc = irq_to_desc(irq);
1991 struct irqaction *action;
1992 const char *devname;
1994 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1998 if (WARN_ON(desc->affinity_notify))
1999 desc->affinity_notify = NULL;
2002 action = __free_irq(desc, dev_id);
2007 devname = action->name;
2011 EXPORT_SYMBOL(free_irq);
2013 /* This function must be called with desc->lock held */
2014 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
2016 const char *devname = NULL;
2018 desc->istate &= ~IRQS_NMI;
2020 if (!WARN_ON(desc->action == NULL)) {
2021 irq_pm_remove_action(desc, desc->action);
2022 devname = desc->action->name;
2023 unregister_handler_proc(irq, desc->action);
2025 kfree(desc->action);
2026 desc->action = NULL;
2029 irq_settings_clr_disable_unlazy(desc);
2030 irq_shutdown_and_deactivate(desc);
2032 irq_release_resources(desc);
2034 irq_chip_pm_put(&desc->irq_data);
2035 module_put(desc->owner);
2040 const void *free_nmi(unsigned int irq, void *dev_id)
2042 struct irq_desc *desc = irq_to_desc(irq);
2043 unsigned long flags;
2044 const void *devname;
2046 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
2049 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2052 /* NMI still enabled */
2053 if (WARN_ON(desc->depth == 0))
2054 disable_nmi_nosync(irq);
2056 raw_spin_lock_irqsave(&desc->lock, flags);
2058 irq_nmi_teardown(desc);
2059 devname = __cleanup_nmi(irq, desc);
2061 raw_spin_unlock_irqrestore(&desc->lock, flags);
2067 * request_threaded_irq - allocate an interrupt line
2068 * @irq: Interrupt line to allocate
2069 * @handler: Function to be called when the IRQ occurs.
2070 * Primary handler for threaded interrupts
2071 * If NULL and thread_fn != NULL the default
2072 * primary handler is installed
2073 * @thread_fn: Function called from the irq handler thread
2074 * If NULL, no irq thread is created
2075 * @irqflags: Interrupt type flags
2076 * @devname: An ascii name for the claiming device
2077 * @dev_id: A cookie passed back to the handler function
2079 * This call allocates interrupt resources and enables the
2080 * interrupt line and IRQ handling. From the point this
2081 * call is made your handler function may be invoked. Since
2082 * your handler function must clear any interrupt the board
2083 * raises, you must take care both to initialise your hardware
2084 * and to set up the interrupt handler in the right order.
2086 * If you want to set up a threaded irq handler for your device
2087 * then you need to supply @handler and @thread_fn. @handler is
2088 * still called in hard interrupt context and has to check
2089 * whether the interrupt originates from the device. If yes it
2090 * needs to disable the interrupt on the device and return
2091 * IRQ_WAKE_THREAD which will wake up the handler thread and run
2092 * @thread_fn. This split handler design is necessary to support
2093 * shared interrupts.
2095 * Dev_id must be globally unique. Normally the address of the
2096 * device data structure is used as the cookie. Since the handler
2097 * receives this value it makes sense to use it.
2099 * If your interrupt is shared you must pass a non NULL dev_id
2100 * as this is required when freeing the interrupt.
2104 * IRQF_SHARED Interrupt is shared
2105 * IRQF_TRIGGER_* Specify active edge(s) or level
2108 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
2109 irq_handler_t thread_fn, unsigned long irqflags,
2110 const char *devname, void *dev_id)
2112 struct irqaction *action;
2113 struct irq_desc *desc;
2116 if (irq == IRQ_NOTCONNECTED)
2120 * Sanity-check: shared interrupts must pass in a real dev-ID,
2121 * otherwise we'll have trouble later trying to figure out
2122 * which interrupt is which (messes up the interrupt freeing
2125 * Also shared interrupts do not go well with disabling auto enable.
2126 * The sharing interrupt might request it while it's still disabled
2127 * and then wait for interrupts forever.
2129 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
2130 * it cannot be set along with IRQF_NO_SUSPEND.
2132 if (((irqflags & IRQF_SHARED) && !dev_id) ||
2133 ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
2134 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
2135 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
2138 desc = irq_to_desc(irq);
2142 if (!irq_settings_can_request(desc) ||
2143 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2149 handler = irq_default_primary_handler;
2152 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2156 action->handler = handler;
2157 action->thread_fn = thread_fn;
2158 action->flags = irqflags;
2159 action->name = devname;
2160 action->dev_id = dev_id;
2162 retval = irq_chip_pm_get(&desc->irq_data);
2168 retval = __setup_irq(irq, desc, action);
2171 irq_chip_pm_put(&desc->irq_data);
2172 kfree(action->secondary);
2176 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
2177 if (!retval && (irqflags & IRQF_SHARED)) {
2179 * It's a shared IRQ -- the driver ought to be prepared for it
2180 * to happen immediately, so let's make sure....
2181 * We disable the irq to make sure that a 'real' IRQ doesn't
2182 * run in parallel with our fake.
2184 unsigned long flags;
2187 local_irq_save(flags);
2189 handler(irq, dev_id);
2191 local_irq_restore(flags);
2197 EXPORT_SYMBOL(request_threaded_irq);
2200 * request_any_context_irq - allocate an interrupt line
2201 * @irq: Interrupt line to allocate
2202 * @handler: Function to be called when the IRQ occurs.
2203 * Threaded handler for threaded interrupts.
2204 * @flags: Interrupt type flags
2205 * @name: An ascii name for the claiming device
2206 * @dev_id: A cookie passed back to the handler function
2208 * This call allocates interrupt resources and enables the
2209 * interrupt line and IRQ handling. It selects either a
2210 * hardirq or threaded handling method depending on the
2213 * On failure, it returns a negative value. On success,
2214 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2216 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2217 unsigned long flags, const char *name, void *dev_id)
2219 struct irq_desc *desc;
2222 if (irq == IRQ_NOTCONNECTED)
2225 desc = irq_to_desc(irq);
2229 if (irq_settings_is_nested_thread(desc)) {
2230 ret = request_threaded_irq(irq, NULL, handler,
2231 flags, name, dev_id);
2232 return !ret ? IRQC_IS_NESTED : ret;
2235 ret = request_irq(irq, handler, flags, name, dev_id);
2236 return !ret ? IRQC_IS_HARDIRQ : ret;
2238 EXPORT_SYMBOL_GPL(request_any_context_irq);
2241 * request_nmi - allocate an interrupt line for NMI delivery
2242 * @irq: Interrupt line to allocate
2243 * @handler: Function to be called when the IRQ occurs.
2244 * Threaded handler for threaded interrupts.
2245 * @irqflags: Interrupt type flags
2246 * @name: An ascii name for the claiming device
2247 * @dev_id: A cookie passed back to the handler function
2249 * This call allocates interrupt resources and enables the
2250 * interrupt line and IRQ handling. It sets up the IRQ line
2251 * to be handled as an NMI.
2253 * An interrupt line delivering NMIs cannot be shared and IRQ handling
2254 * cannot be threaded.
2256 * Interrupt lines requested for NMI delivering must produce per cpu
2257 * interrupts and have auto enabling setting disabled.
2259 * Dev_id must be globally unique. Normally the address of the
2260 * device data structure is used as the cookie. Since the handler
2261 * receives this value it makes sense to use it.
2263 * If the interrupt line cannot be used to deliver NMIs, function
2264 * will fail and return a negative value.
2266 int request_nmi(unsigned int irq, irq_handler_t handler,
2267 unsigned long irqflags, const char *name, void *dev_id)
2269 struct irqaction *action;
2270 struct irq_desc *desc;
2271 unsigned long flags;
2274 if (irq == IRQ_NOTCONNECTED)
2277 /* NMI cannot be shared, used for Polling */
2278 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2281 if (!(irqflags & IRQF_PERCPU))
2287 desc = irq_to_desc(irq);
2289 if (!desc || (irq_settings_can_autoenable(desc) &&
2290 !(irqflags & IRQF_NO_AUTOEN)) ||
2291 !irq_settings_can_request(desc) ||
2292 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2293 !irq_supports_nmi(desc))
2296 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2300 action->handler = handler;
2301 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2302 action->name = name;
2303 action->dev_id = dev_id;
2305 retval = irq_chip_pm_get(&desc->irq_data);
2309 retval = __setup_irq(irq, desc, action);
2313 raw_spin_lock_irqsave(&desc->lock, flags);
2315 /* Setup NMI state */
2316 desc->istate |= IRQS_NMI;
2317 retval = irq_nmi_setup(desc);
2319 __cleanup_nmi(irq, desc);
2320 raw_spin_unlock_irqrestore(&desc->lock, flags);
2324 raw_spin_unlock_irqrestore(&desc->lock, flags);
2329 irq_chip_pm_put(&desc->irq_data);
2336 void enable_percpu_irq(unsigned int irq, unsigned int type)
2338 unsigned int cpu = smp_processor_id();
2339 unsigned long flags;
2340 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2346 * If the trigger type is not specified by the caller, then
2347 * use the default for this interrupt.
2349 type &= IRQ_TYPE_SENSE_MASK;
2350 if (type == IRQ_TYPE_NONE)
2351 type = irqd_get_trigger_type(&desc->irq_data);
2353 if (type != IRQ_TYPE_NONE) {
2356 ret = __irq_set_trigger(desc, type);
2359 WARN(1, "failed to set type for IRQ%d\n", irq);
2364 irq_percpu_enable(desc, cpu);
2366 irq_put_desc_unlock(desc, flags);
2368 EXPORT_SYMBOL_GPL(enable_percpu_irq);
2370 void enable_percpu_nmi(unsigned int irq, unsigned int type)
2372 enable_percpu_irq(irq, type);
2376 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2377 * @irq: Linux irq number to check for
2379 * Must be called from a non migratable context. Returns the enable
2380 * state of a per cpu interrupt on the current cpu.
2382 bool irq_percpu_is_enabled(unsigned int irq)
2384 unsigned int cpu = smp_processor_id();
2385 struct irq_desc *desc;
2386 unsigned long flags;
2389 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2393 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2394 irq_put_desc_unlock(desc, flags);
2398 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2400 void disable_percpu_irq(unsigned int irq)
2402 unsigned int cpu = smp_processor_id();
2403 unsigned long flags;
2404 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2409 irq_percpu_disable(desc, cpu);
2410 irq_put_desc_unlock(desc, flags);
2412 EXPORT_SYMBOL_GPL(disable_percpu_irq);
2414 void disable_percpu_nmi(unsigned int irq)
2416 disable_percpu_irq(irq);
2420 * Internal function to unregister a percpu irqaction.
2422 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2424 struct irq_desc *desc = irq_to_desc(irq);
2425 struct irqaction *action;
2426 unsigned long flags;
2428 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2433 raw_spin_lock_irqsave(&desc->lock, flags);
2435 action = desc->action;
2436 if (!action || action->percpu_dev_id != dev_id) {
2437 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2441 if (!cpumask_empty(desc->percpu_enabled)) {
2442 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2443 irq, cpumask_first(desc->percpu_enabled));
2447 /* Found it - now remove it from the list of entries: */
2448 desc->action = NULL;
2450 desc->istate &= ~IRQS_NMI;
2452 raw_spin_unlock_irqrestore(&desc->lock, flags);
2454 unregister_handler_proc(irq, action);
2456 irq_chip_pm_put(&desc->irq_data);
2457 module_put(desc->owner);
2461 raw_spin_unlock_irqrestore(&desc->lock, flags);
2466 * remove_percpu_irq - free a per-cpu interrupt
2467 * @irq: Interrupt line to free
2468 * @act: irqaction for the interrupt
2470 * Used to remove interrupts statically setup by the early boot process.
2472 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2474 struct irq_desc *desc = irq_to_desc(irq);
2476 if (desc && irq_settings_is_per_cpu_devid(desc))
2477 __free_percpu_irq(irq, act->percpu_dev_id);
2481 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
2482 * @irq: Interrupt line to free
2483 * @dev_id: Device identity to free
2485 * Remove a percpu interrupt handler. The handler is removed, but
2486 * the interrupt line is not disabled. This must be done on each
2487 * CPU before calling this function. The function does not return
2488 * until any executing interrupts for this IRQ have completed.
2490 * This function must not be called from interrupt context.
2492 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2494 struct irq_desc *desc = irq_to_desc(irq);
2496 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2499 chip_bus_lock(desc);
2500 kfree(__free_percpu_irq(irq, dev_id));
2501 chip_bus_sync_unlock(desc);
2503 EXPORT_SYMBOL_GPL(free_percpu_irq);
2505 void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2507 struct irq_desc *desc = irq_to_desc(irq);
2509 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2512 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2515 kfree(__free_percpu_irq(irq, dev_id));
2519 * setup_percpu_irq - setup a per-cpu interrupt
2520 * @irq: Interrupt line to setup
2521 * @act: irqaction for the interrupt
2523 * Used to statically setup per-cpu interrupts in the early boot process.
2525 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2527 struct irq_desc *desc = irq_to_desc(irq);
2530 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2533 retval = irq_chip_pm_get(&desc->irq_data);
2537 retval = __setup_irq(irq, desc, act);
2540 irq_chip_pm_put(&desc->irq_data);
2546 * __request_percpu_irq - allocate a percpu interrupt line
2547 * @irq: Interrupt line to allocate
2548 * @handler: Function to be called when the IRQ occurs.
2549 * @flags: Interrupt type flags (IRQF_TIMER only)
2550 * @devname: An ascii name for the claiming device
2551 * @dev_id: A percpu cookie passed back to the handler function
2553 * This call allocates interrupt resources and enables the
2554 * interrupt on the local CPU. If the interrupt is supposed to be
2555 * enabled on other CPUs, it has to be done on each CPU using
2556 * enable_percpu_irq().
2558 * Dev_id must be globally unique. It is a per-cpu variable, and
2559 * the handler gets called with the interrupted CPU's instance of
2562 int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2563 unsigned long flags, const char *devname,
2564 void __percpu *dev_id)
2566 struct irqaction *action;
2567 struct irq_desc *desc;
2573 desc = irq_to_desc(irq);
2574 if (!desc || !irq_settings_can_request(desc) ||
2575 !irq_settings_is_per_cpu_devid(desc))
2578 if (flags && flags != IRQF_TIMER)
2581 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2585 action->handler = handler;
2586 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2587 action->name = devname;
2588 action->percpu_dev_id = dev_id;
2590 retval = irq_chip_pm_get(&desc->irq_data);
2596 retval = __setup_irq(irq, desc, action);
2599 irq_chip_pm_put(&desc->irq_data);
2605 EXPORT_SYMBOL_GPL(__request_percpu_irq);
2608 * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2609 * @irq: Interrupt line to allocate
2610 * @handler: Function to be called when the IRQ occurs.
2611 * @name: An ascii name for the claiming device
2612 * @dev_id: A percpu cookie passed back to the handler function
2614 * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2615 * have to be setup on each CPU by calling prepare_percpu_nmi() before
2616 * being enabled on the same CPU by using enable_percpu_nmi().
2618 * Dev_id must be globally unique. It is a per-cpu variable, and
2619 * the handler gets called with the interrupted CPU's instance of
2622 * Interrupt lines requested for NMI delivering should have auto enabling
2625 * If the interrupt line cannot be used to deliver NMIs, function
2626 * will fail returning a negative value.
2628 int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2629 const char *name, void __percpu *dev_id)
2631 struct irqaction *action;
2632 struct irq_desc *desc;
2633 unsigned long flags;
2639 desc = irq_to_desc(irq);
2641 if (!desc || !irq_settings_can_request(desc) ||
2642 !irq_settings_is_per_cpu_devid(desc) ||
2643 irq_settings_can_autoenable(desc) ||
2644 !irq_supports_nmi(desc))
2647 /* The line cannot already be NMI */
2648 if (desc->istate & IRQS_NMI)
2651 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2655 action->handler = handler;
2656 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2658 action->name = name;
2659 action->percpu_dev_id = dev_id;
2661 retval = irq_chip_pm_get(&desc->irq_data);
2665 retval = __setup_irq(irq, desc, action);
2669 raw_spin_lock_irqsave(&desc->lock, flags);
2670 desc->istate |= IRQS_NMI;
2671 raw_spin_unlock_irqrestore(&desc->lock, flags);
2676 irq_chip_pm_put(&desc->irq_data);
2684 * prepare_percpu_nmi - performs CPU local setup for NMI delivery
2685 * @irq: Interrupt line to prepare for NMI delivery
2687 * This call prepares an interrupt line to deliver NMI on the current CPU,
2688 * before that interrupt line gets enabled with enable_percpu_nmi().
2690 * As a CPU local operation, this should be called from non-preemptible
2693 * If the interrupt line cannot be used to deliver NMIs, function
2694 * will fail returning a negative value.
2696 int prepare_percpu_nmi(unsigned int irq)
2698 unsigned long flags;
2699 struct irq_desc *desc;
2702 WARN_ON(preemptible());
2704 desc = irq_get_desc_lock(irq, &flags,
2705 IRQ_GET_DESC_CHECK_PERCPU);
2709 if (WARN(!(desc->istate & IRQS_NMI),
2710 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2716 ret = irq_nmi_setup(desc);
2718 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2723 irq_put_desc_unlock(desc, flags);
2728 * teardown_percpu_nmi - undoes NMI setup of IRQ line
2729 * @irq: Interrupt line from which CPU local NMI configuration should be
2732 * This call undoes the setup done by prepare_percpu_nmi().
2734 * IRQ line should not be enabled for the current CPU.
2736 * As a CPU local operation, this should be called from non-preemptible
2739 void teardown_percpu_nmi(unsigned int irq)
2741 unsigned long flags;
2742 struct irq_desc *desc;
2744 WARN_ON(preemptible());
2746 desc = irq_get_desc_lock(irq, &flags,
2747 IRQ_GET_DESC_CHECK_PERCPU);
2751 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2754 irq_nmi_teardown(desc);
2756 irq_put_desc_unlock(desc, flags);
2759 int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2762 struct irq_chip *chip;
2766 chip = irq_data_get_irq_chip(data);
2767 if (WARN_ON_ONCE(!chip))
2769 if (chip->irq_get_irqchip_state)
2771 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2772 data = data->parent_data;
2779 err = chip->irq_get_irqchip_state(data, which, state);
2784 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2785 * @irq: Interrupt line that is forwarded to a VM
2786 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2787 * @state: a pointer to a boolean where the state is to be stored
2789 * This call snapshots the internal irqchip state of an
2790 * interrupt, returning into @state the bit corresponding to
2793 * This function should be called with preemption disabled if the
2794 * interrupt controller has per-cpu registers.
2796 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2799 struct irq_desc *desc;
2800 struct irq_data *data;
2801 unsigned long flags;
2804 desc = irq_get_desc_buslock(irq, &flags, 0);
2808 data = irq_desc_get_irq_data(desc);
2810 err = __irq_get_irqchip_state(data, which, state);
2812 irq_put_desc_busunlock(desc, flags);
2815 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2818 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2819 * @irq: Interrupt line that is forwarded to a VM
2820 * @which: State to be restored (one of IRQCHIP_STATE_*)
2821 * @val: Value corresponding to @which
2823 * This call sets the internal irqchip state of an interrupt,
2824 * depending on the value of @which.
2826 * This function should be called with preemption disabled if the
2827 * interrupt controller has per-cpu registers.
2829 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2832 struct irq_desc *desc;
2833 struct irq_data *data;
2834 struct irq_chip *chip;
2835 unsigned long flags;
2838 desc = irq_get_desc_buslock(irq, &flags, 0);
2842 data = irq_desc_get_irq_data(desc);
2845 chip = irq_data_get_irq_chip(data);
2846 if (WARN_ON_ONCE(!chip)) {
2850 if (chip->irq_set_irqchip_state)
2852 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2853 data = data->parent_data;
2860 err = chip->irq_set_irqchip_state(data, which, val);
2863 irq_put_desc_busunlock(desc, flags);
2866 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2869 * irq_has_action - Check whether an interrupt is requested
2870 * @irq: The linux irq number
2872 * Returns: A snapshot of the current state
2874 bool irq_has_action(unsigned int irq)
2879 res = irq_desc_has_action(irq_to_desc(irq));
2883 EXPORT_SYMBOL_GPL(irq_has_action);
2886 * irq_check_status_bit - Check whether bits in the irq descriptor status are set
2887 * @irq: The linux irq number
2888 * @bitmask: The bitmask to evaluate
2890 * Returns: True if one of the bits in @bitmask is set
2892 bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
2894 struct irq_desc *desc;
2898 desc = irq_to_desc(irq);
2900 res = !!(desc->status_use_accessors & bitmask);
2904 EXPORT_SYMBOL_GPL(irq_check_status_bit);