1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006 Thomas Gleixner
6 * This file contains driver APIs to the irq subsystem.
9 #define pr_fmt(fmt) "genirq: " fmt
11 #include <linux/irq.h>
12 #include <linux/kthread.h>
13 #include <linux/module.h>
14 #include <linux/random.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <linux/sched/task.h>
21 #include <uapi/linux/sched/types.h>
22 #include <linux/task_work.h>
24 #include "internals.h"
26 #ifdef CONFIG_IRQ_FORCED_THREADING
27 __read_mostly bool force_irqthreads;
28 EXPORT_SYMBOL_GPL(force_irqthreads);
30 static int __init setup_forced_irqthreads(char *arg)
32 force_irqthreads = true;
35 early_param("threadirqs", setup_forced_irqthreads);
38 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
40 struct irq_data *irqd = irq_desc_get_irq_data(desc);
47 * Wait until we're out of the critical section. This might
48 * give the wrong answer due to the lack of memory barriers.
50 while (irqd_irq_inprogress(&desc->irq_data))
53 /* Ok, that indicated we're done: double-check carefully. */
54 raw_spin_lock_irqsave(&desc->lock, flags);
55 inprogress = irqd_irq_inprogress(&desc->irq_data);
58 * If requested and supported, check at the chip whether it
59 * is in flight at the hardware level, i.e. already pending
60 * in a CPU and waiting for service and acknowledge.
62 if (!inprogress && sync_chip) {
64 * Ignore the return code. inprogress is only updated
65 * when the chip supports it.
67 __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
70 raw_spin_unlock_irqrestore(&desc->lock, flags);
72 /* Oops, that failed? */
77 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
78 * @irq: interrupt number to wait for
80 * This function waits for any pending hard IRQ handlers for this
81 * interrupt to complete before returning. If you use this
82 * function while holding a resource the IRQ handler may need you
83 * will deadlock. It does not take associated threaded handlers
86 * Do not use this for shutdown scenarios where you must be sure
87 * that all parts (hardirq and threaded handler) have completed.
89 * Returns: false if a threaded handler is active.
91 * This function may be called - with care - from IRQ context.
93 * It does not check whether there is an interrupt in flight at the
94 * hardware level, but not serviced yet, as this might deadlock when
95 * called with interrupts disabled and the target CPU of the interrupt
98 bool synchronize_hardirq(unsigned int irq)
100 struct irq_desc *desc = irq_to_desc(irq);
103 __synchronize_hardirq(desc, false);
104 return !atomic_read(&desc->threads_active);
109 EXPORT_SYMBOL(synchronize_hardirq);
112 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
113 * @irq: interrupt number to wait for
115 * This function waits for any pending IRQ handlers for this interrupt
116 * to complete before returning. If you use this function while
117 * holding a resource the IRQ handler may need you will deadlock.
119 * Can only be called from preemptible code as it might sleep when
120 * an interrupt thread is associated to @irq.
122 * It optionally makes sure (when the irq chip supports that method)
123 * that the interrupt is not pending in any CPU and waiting for
126 void synchronize_irq(unsigned int irq)
128 struct irq_desc *desc = irq_to_desc(irq);
131 __synchronize_hardirq(desc, true);
133 * We made sure that no hardirq handler is
134 * running. Now verify that no threaded handlers are
137 wait_event(desc->wait_for_threads,
138 !atomic_read(&desc->threads_active));
141 EXPORT_SYMBOL(synchronize_irq);
144 cpumask_var_t irq_default_affinity;
146 static bool __irq_can_set_affinity(struct irq_desc *desc)
148 if (!desc || !irqd_can_balance(&desc->irq_data) ||
149 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
155 * irq_can_set_affinity - Check if the affinity of a given irq can be set
156 * @irq: Interrupt to check
159 int irq_can_set_affinity(unsigned int irq)
161 return __irq_can_set_affinity(irq_to_desc(irq));
165 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
166 * @irq: Interrupt to check
168 * Like irq_can_set_affinity() above, but additionally checks for the
169 * AFFINITY_MANAGED flag.
171 bool irq_can_set_affinity_usr(unsigned int irq)
173 struct irq_desc *desc = irq_to_desc(irq);
175 return __irq_can_set_affinity(desc) &&
176 !irqd_affinity_is_managed(&desc->irq_data);
180 * irq_set_thread_affinity - Notify irq threads to adjust affinity
181 * @desc: irq descriptor which has affitnity changed
183 * We just set IRQTF_AFFINITY and delegate the affinity setting
184 * to the interrupt thread itself. We can not call
185 * set_cpus_allowed_ptr() here as we hold desc->lock and this
186 * code can be called from hard interrupt context.
188 void irq_set_thread_affinity(struct irq_desc *desc)
190 struct irqaction *action;
192 for_each_action_of_desc(desc, action)
194 set_bit(IRQTF_AFFINITY, &action->thread_flags);
197 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
198 static void irq_validate_effective_affinity(struct irq_data *data)
200 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
201 struct irq_chip *chip = irq_data_get_irq_chip(data);
203 if (!cpumask_empty(m))
205 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
206 chip->name, data->irq);
209 static inline void irq_init_effective_affinity(struct irq_data *data,
210 const struct cpumask *mask)
212 cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
215 static inline void irq_validate_effective_affinity(struct irq_data *data) { }
216 static inline void irq_init_effective_affinity(struct irq_data *data,
217 const struct cpumask *mask) { }
220 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
223 struct irq_desc *desc = irq_data_to_desc(data);
224 struct irq_chip *chip = irq_data_get_irq_chip(data);
227 if (!chip || !chip->irq_set_affinity)
230 ret = chip->irq_set_affinity(data, mask, force);
232 case IRQ_SET_MASK_OK:
233 case IRQ_SET_MASK_OK_DONE:
234 cpumask_copy(desc->irq_common_data.affinity, mask);
235 case IRQ_SET_MASK_OK_NOCOPY:
236 irq_validate_effective_affinity(data);
237 irq_set_thread_affinity(desc);
244 #ifdef CONFIG_GENERIC_PENDING_IRQ
245 static inline int irq_set_affinity_pending(struct irq_data *data,
246 const struct cpumask *dest)
248 struct irq_desc *desc = irq_data_to_desc(data);
250 irqd_set_move_pending(data);
251 irq_copy_pending(desc, dest);
255 static inline int irq_set_affinity_pending(struct irq_data *data,
256 const struct cpumask *dest)
262 static int irq_try_set_affinity(struct irq_data *data,
263 const struct cpumask *dest, bool force)
265 int ret = irq_do_set_affinity(data, dest, force);
268 * In case that the underlying vector management is busy and the
269 * architecture supports the generic pending mechanism then utilize
270 * this to avoid returning an error to user space.
272 if (ret == -EBUSY && !force)
273 ret = irq_set_affinity_pending(data, dest);
277 static bool irq_set_affinity_deactivated(struct irq_data *data,
278 const struct cpumask *mask, bool force)
280 struct irq_desc *desc = irq_data_to_desc(data);
283 * Handle irq chips which can handle affinity only in activated
286 * If the interrupt is not yet activated, just store the affinity
287 * mask and do not call the chip driver at all. On activation the
288 * driver has to make sure anyway that the interrupt is in a
289 * useable state so startup works.
291 if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
292 irqd_is_activated(data) || !irqd_affinity_on_activate(data))
295 cpumask_copy(desc->irq_common_data.affinity, mask);
296 irq_init_effective_affinity(data, mask);
297 irqd_set(data, IRQD_AFFINITY_SET);
301 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
304 struct irq_chip *chip = irq_data_get_irq_chip(data);
305 struct irq_desc *desc = irq_data_to_desc(data);
308 if (!chip || !chip->irq_set_affinity)
311 if (irq_set_affinity_deactivated(data, mask, force))
314 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
315 ret = irq_try_set_affinity(data, mask, force);
317 irqd_set_move_pending(data);
318 irq_copy_pending(desc, mask);
321 if (desc->affinity_notify) {
322 kref_get(&desc->affinity_notify->kref);
323 if (!schedule_work(&desc->affinity_notify->work)) {
324 /* Work was already scheduled, drop our extra ref */
325 kref_put(&desc->affinity_notify->kref,
326 desc->affinity_notify->release);
329 irqd_set(data, IRQD_AFFINITY_SET);
334 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
336 struct irq_desc *desc = irq_to_desc(irq);
343 raw_spin_lock_irqsave(&desc->lock, flags);
344 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
345 raw_spin_unlock_irqrestore(&desc->lock, flags);
349 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
352 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
356 desc->affinity_hint = m;
357 irq_put_desc_unlock(desc, flags);
358 /* set the initial affinity to prevent every interrupt being on CPU0 */
360 __irq_set_affinity(irq, m, false);
363 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
365 static void irq_affinity_notify(struct work_struct *work)
367 struct irq_affinity_notify *notify =
368 container_of(work, struct irq_affinity_notify, work);
369 struct irq_desc *desc = irq_to_desc(notify->irq);
370 cpumask_var_t cpumask;
373 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
376 raw_spin_lock_irqsave(&desc->lock, flags);
377 if (irq_move_pending(&desc->irq_data))
378 irq_get_pending(cpumask, desc);
380 cpumask_copy(cpumask, desc->irq_common_data.affinity);
381 raw_spin_unlock_irqrestore(&desc->lock, flags);
383 notify->notify(notify, cpumask);
385 free_cpumask_var(cpumask);
387 kref_put(¬ify->kref, notify->release);
391 * irq_set_affinity_notifier - control notification of IRQ affinity changes
392 * @irq: Interrupt for which to enable/disable notification
393 * @notify: Context for notification, or %NULL to disable
394 * notification. Function pointers must be initialised;
395 * the other fields will be initialised by this function.
397 * Must be called in process context. Notification may only be enabled
398 * after the IRQ is allocated and must be disabled before the IRQ is
399 * freed using free_irq().
402 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
404 struct irq_desc *desc = irq_to_desc(irq);
405 struct irq_affinity_notify *old_notify;
408 /* The release function is promised process context */
414 /* Complete initialisation of *notify */
417 kref_init(¬ify->kref);
418 INIT_WORK(¬ify->work, irq_affinity_notify);
421 raw_spin_lock_irqsave(&desc->lock, flags);
422 old_notify = desc->affinity_notify;
423 desc->affinity_notify = notify;
424 raw_spin_unlock_irqrestore(&desc->lock, flags);
427 if (cancel_work_sync(&old_notify->work)) {
428 /* Pending work had a ref, put that one too */
429 kref_put(&old_notify->kref, old_notify->release);
431 kref_put(&old_notify->kref, old_notify->release);
436 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
438 #ifndef CONFIG_AUTO_IRQ_AFFINITY
440 * Generic version of the affinity autoselector.
442 int irq_setup_affinity(struct irq_desc *desc)
444 struct cpumask *set = irq_default_affinity;
445 int ret, node = irq_desc_get_node(desc);
446 static DEFINE_RAW_SPINLOCK(mask_lock);
447 static struct cpumask mask;
449 /* Excludes PER_CPU and NO_BALANCE interrupts */
450 if (!__irq_can_set_affinity(desc))
453 raw_spin_lock(&mask_lock);
455 * Preserve the managed affinity setting and a userspace affinity
456 * setup, but make sure that one of the targets is online.
458 if (irqd_affinity_is_managed(&desc->irq_data) ||
459 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
460 if (cpumask_intersects(desc->irq_common_data.affinity,
462 set = desc->irq_common_data.affinity;
464 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
467 cpumask_and(&mask, cpu_online_mask, set);
468 if (cpumask_empty(&mask))
469 cpumask_copy(&mask, cpu_online_mask);
471 if (node != NUMA_NO_NODE) {
472 const struct cpumask *nodemask = cpumask_of_node(node);
474 /* make sure at least one of the cpus in nodemask is online */
475 if (cpumask_intersects(&mask, nodemask))
476 cpumask_and(&mask, &mask, nodemask);
478 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
479 raw_spin_unlock(&mask_lock);
483 /* Wrapper for ALPHA specific affinity selector magic */
484 int irq_setup_affinity(struct irq_desc *desc)
486 return irq_select_affinity(irq_desc_get_irq(desc));
488 #endif /* CONFIG_AUTO_IRQ_AFFINITY */
489 #endif /* CONFIG_SMP */
493 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
494 * @irq: interrupt number to set affinity
495 * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
496 * specific data for percpu_devid interrupts
498 * This function uses the vCPU specific data to set the vCPU
499 * affinity for an irq. The vCPU specific data is passed from
500 * outside, such as KVM. One example code path is as below:
501 * KVM -> IOMMU -> irq_set_vcpu_affinity().
503 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
506 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
507 struct irq_data *data;
508 struct irq_chip *chip;
514 data = irq_desc_get_irq_data(desc);
516 chip = irq_data_get_irq_chip(data);
517 if (chip && chip->irq_set_vcpu_affinity)
519 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
520 data = data->parent_data;
527 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
528 irq_put_desc_unlock(desc, flags);
532 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
534 void __disable_irq(struct irq_desc *desc)
540 static int __disable_irq_nosync(unsigned int irq)
543 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
548 irq_put_desc_busunlock(desc, flags);
553 * disable_irq_nosync - disable an irq without waiting
554 * @irq: Interrupt to disable
556 * Disable the selected interrupt line. Disables and Enables are
558 * Unlike disable_irq(), this function does not ensure existing
559 * instances of the IRQ handler have completed before returning.
561 * This function may be called from IRQ context.
563 void disable_irq_nosync(unsigned int irq)
565 __disable_irq_nosync(irq);
567 EXPORT_SYMBOL(disable_irq_nosync);
570 * disable_irq - disable an irq and wait for completion
571 * @irq: Interrupt to disable
573 * Disable the selected interrupt line. Enables and Disables are
575 * This function waits for any pending IRQ handlers for this interrupt
576 * to complete before returning. If you use this function while
577 * holding a resource the IRQ handler may need you will deadlock.
579 * This function may be called - with care - from IRQ context.
581 void disable_irq(unsigned int irq)
583 if (!__disable_irq_nosync(irq))
584 synchronize_irq(irq);
586 EXPORT_SYMBOL(disable_irq);
589 * disable_hardirq - disables an irq and waits for hardirq completion
590 * @irq: Interrupt to disable
592 * Disable the selected interrupt line. Enables and Disables are
594 * This function waits for any pending hard IRQ handlers for this
595 * interrupt to complete before returning. If you use this function while
596 * holding a resource the hard IRQ handler may need you will deadlock.
598 * When used to optimistically disable an interrupt from atomic context
599 * the return value must be checked.
601 * Returns: false if a threaded handler is active.
603 * This function may be called - with care - from IRQ context.
605 bool disable_hardirq(unsigned int irq)
607 if (!__disable_irq_nosync(irq))
608 return synchronize_hardirq(irq);
612 EXPORT_SYMBOL_GPL(disable_hardirq);
614 void __enable_irq(struct irq_desc *desc)
616 switch (desc->depth) {
619 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
620 irq_desc_get_irq(desc));
623 if (desc->istate & IRQS_SUSPENDED)
625 /* Prevent probing on this irq: */
626 irq_settings_set_noprobe(desc);
628 * Call irq_startup() not irq_enable() here because the
629 * interrupt might be marked NOAUTOEN. So irq_startup()
630 * needs to be invoked when it gets enabled the first
631 * time. If it was already started up, then irq_startup()
632 * will invoke irq_enable() under the hood.
634 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
643 * enable_irq - enable handling of an irq
644 * @irq: Interrupt to enable
646 * Undoes the effect of one call to disable_irq(). If this
647 * matches the last disable, processing of interrupts on this
648 * IRQ line is re-enabled.
650 * This function may be called from IRQ context only when
651 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
653 void enable_irq(unsigned int irq)
656 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
660 if (WARN(!desc->irq_data.chip,
661 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
666 irq_put_desc_busunlock(desc, flags);
668 EXPORT_SYMBOL(enable_irq);
670 static int set_irq_wake_real(unsigned int irq, unsigned int on)
672 struct irq_desc *desc = irq_to_desc(irq);
675 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
678 if (desc->irq_data.chip->irq_set_wake)
679 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
685 * irq_set_irq_wake - control irq power management wakeup
686 * @irq: interrupt to control
687 * @on: enable/disable power management wakeup
689 * Enable/disable power management wakeup mode, which is
690 * disabled by default. Enables and disables must match,
691 * just as they match for non-wakeup mode support.
693 * Wakeup mode lets this IRQ wake the system from sleep
694 * states like "suspend to RAM".
696 int irq_set_irq_wake(unsigned int irq, unsigned int on)
699 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
705 /* wakeup-capable irqs can be shared between drivers that
706 * don't need to have the same sleep mode behaviors.
709 if (desc->wake_depth++ == 0) {
710 ret = set_irq_wake_real(irq, on);
712 desc->wake_depth = 0;
714 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
717 if (desc->wake_depth == 0) {
718 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
719 } else if (--desc->wake_depth == 0) {
720 ret = set_irq_wake_real(irq, on);
722 desc->wake_depth = 1;
724 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
727 irq_put_desc_busunlock(desc, flags);
730 EXPORT_SYMBOL(irq_set_irq_wake);
733 * Internal function that tells the architecture code whether a
734 * particular irq has been exclusively allocated or is available
737 int can_request_irq(unsigned int irq, unsigned long irqflags)
740 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
746 if (irq_settings_can_request(desc)) {
748 irqflags & desc->action->flags & IRQF_SHARED)
751 irq_put_desc_unlock(desc, flags);
755 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
757 struct irq_chip *chip = desc->irq_data.chip;
760 if (!chip || !chip->irq_set_type) {
762 * IRQF_TRIGGER_* but the PIC does not support multiple
765 pr_debug("No set_type function for IRQ %d (%s)\n",
766 irq_desc_get_irq(desc),
767 chip ? (chip->name ? : "unknown") : "unknown");
771 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
772 if (!irqd_irq_masked(&desc->irq_data))
774 if (!irqd_irq_disabled(&desc->irq_data))
778 /* Mask all flags except trigger mode */
779 flags &= IRQ_TYPE_SENSE_MASK;
780 ret = chip->irq_set_type(&desc->irq_data, flags);
783 case IRQ_SET_MASK_OK:
784 case IRQ_SET_MASK_OK_DONE:
785 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
786 irqd_set(&desc->irq_data, flags);
788 case IRQ_SET_MASK_OK_NOCOPY:
789 flags = irqd_get_trigger_type(&desc->irq_data);
790 irq_settings_set_trigger_mask(desc, flags);
791 irqd_clear(&desc->irq_data, IRQD_LEVEL);
792 irq_settings_clr_level(desc);
793 if (flags & IRQ_TYPE_LEVEL_MASK) {
794 irq_settings_set_level(desc);
795 irqd_set(&desc->irq_data, IRQD_LEVEL);
801 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
802 flags, irq_desc_get_irq(desc), chip->irq_set_type);
809 #ifdef CONFIG_HARDIRQS_SW_RESEND
810 int irq_set_parent(int irq, int parent_irq)
813 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
818 desc->parent_irq = parent_irq;
820 irq_put_desc_unlock(desc, flags);
823 EXPORT_SYMBOL_GPL(irq_set_parent);
827 * Default primary interrupt handler for threaded interrupts. Is
828 * assigned as primary handler when request_threaded_irq is called
829 * with handler == NULL. Useful for oneshot interrupts.
831 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
833 return IRQ_WAKE_THREAD;
837 * Primary handler for nested threaded interrupts. Should never be
840 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
842 WARN(1, "Primary handler called for nested irq %d\n", irq);
846 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
848 WARN(1, "Secondary action handler called for irq %d\n", irq);
852 static int irq_wait_for_interrupt(struct irqaction *action)
855 set_current_state(TASK_INTERRUPTIBLE);
857 if (kthread_should_stop()) {
858 /* may need to run one last time */
859 if (test_and_clear_bit(IRQTF_RUNTHREAD,
860 &action->thread_flags)) {
861 __set_current_state(TASK_RUNNING);
864 __set_current_state(TASK_RUNNING);
868 if (test_and_clear_bit(IRQTF_RUNTHREAD,
869 &action->thread_flags)) {
870 __set_current_state(TASK_RUNNING);
878 * Oneshot interrupts keep the irq line masked until the threaded
879 * handler finished. unmask if the interrupt has not been disabled and
882 static void irq_finalize_oneshot(struct irq_desc *desc,
883 struct irqaction *action)
885 if (!(desc->istate & IRQS_ONESHOT) ||
886 action->handler == irq_forced_secondary_handler)
890 raw_spin_lock_irq(&desc->lock);
893 * Implausible though it may be we need to protect us against
894 * the following scenario:
896 * The thread is faster done than the hard interrupt handler
897 * on the other CPU. If we unmask the irq line then the
898 * interrupt can come in again and masks the line, leaves due
899 * to IRQS_INPROGRESS and the irq line is masked forever.
901 * This also serializes the state of shared oneshot handlers
902 * versus "desc->threads_onehsot |= action->thread_mask;" in
903 * irq_wake_thread(). See the comment there which explains the
906 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
907 raw_spin_unlock_irq(&desc->lock);
908 chip_bus_sync_unlock(desc);
914 * Now check again, whether the thread should run. Otherwise
915 * we would clear the threads_oneshot bit of this thread which
918 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
921 desc->threads_oneshot &= ~action->thread_mask;
923 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
924 irqd_irq_masked(&desc->irq_data))
925 unmask_threaded_irq(desc);
928 raw_spin_unlock_irq(&desc->lock);
929 chip_bus_sync_unlock(desc);
934 * Check whether we need to change the affinity of the interrupt thread.
937 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
942 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
946 * In case we are out of memory we set IRQTF_AFFINITY again and
947 * try again next time
949 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
950 set_bit(IRQTF_AFFINITY, &action->thread_flags);
954 raw_spin_lock_irq(&desc->lock);
956 * This code is triggered unconditionally. Check the affinity
957 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
959 if (cpumask_available(desc->irq_common_data.affinity)) {
960 const struct cpumask *m;
962 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
963 cpumask_copy(mask, m);
967 raw_spin_unlock_irq(&desc->lock);
970 set_cpus_allowed_ptr(current, mask);
971 free_cpumask_var(mask);
975 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
979 * Interrupts which are not explicitely requested as threaded
980 * interrupts rely on the implicit bh/preempt disable of the hard irq
981 * context. So we need to disable bh here to avoid deadlocks and other
985 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
990 if (!IS_ENABLED(CONFIG_PREEMPT_RT_BASE))
992 ret = action->thread_fn(action->irq, action->dev_id);
993 if (ret == IRQ_HANDLED)
994 atomic_inc(&desc->threads_handled);
996 irq_finalize_oneshot(desc, action);
997 if (!IS_ENABLED(CONFIG_PREEMPT_RT_BASE))
1004 * Interrupts explicitly requested as threaded interrupts want to be
1005 * preemtible - many of them need to sleep and wait for slow busses to
1008 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1009 struct irqaction *action)
1013 ret = action->thread_fn(action->irq, action->dev_id);
1014 if (ret == IRQ_HANDLED)
1015 atomic_inc(&desc->threads_handled);
1017 irq_finalize_oneshot(desc, action);
1021 static void wake_threads_waitq(struct irq_desc *desc)
1023 if (atomic_dec_and_test(&desc->threads_active))
1024 wake_up(&desc->wait_for_threads);
1027 static void irq_thread_dtor(struct callback_head *unused)
1029 struct task_struct *tsk = current;
1030 struct irq_desc *desc;
1031 struct irqaction *action;
1033 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1036 action = kthread_data(tsk);
1038 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1039 tsk->comm, tsk->pid, action->irq);
1042 desc = irq_to_desc(action->irq);
1044 * If IRQTF_RUNTHREAD is set, we need to decrement
1045 * desc->threads_active and wake possible waiters.
1047 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1048 wake_threads_waitq(desc);
1050 /* Prevent a stale desc->threads_oneshot */
1051 irq_finalize_oneshot(desc, action);
1054 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1056 struct irqaction *secondary = action->secondary;
1058 if (WARN_ON_ONCE(!secondary))
1061 raw_spin_lock_irq(&desc->lock);
1062 __irq_wake_thread(desc, secondary);
1063 raw_spin_unlock_irq(&desc->lock);
1067 * Interrupt handler thread
1069 static int irq_thread(void *data)
1071 struct callback_head on_exit_work;
1072 struct irqaction *action = data;
1073 struct irq_desc *desc = irq_to_desc(action->irq);
1074 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1075 struct irqaction *action);
1077 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1078 &action->thread_flags))
1079 handler_fn = irq_forced_thread_fn;
1081 handler_fn = irq_thread_fn;
1083 init_task_work(&on_exit_work, irq_thread_dtor);
1084 task_work_add(current, &on_exit_work, false);
1086 irq_thread_check_affinity(desc, action);
1088 while (!irq_wait_for_interrupt(action)) {
1089 irqreturn_t action_ret;
1091 irq_thread_check_affinity(desc, action);
1093 action_ret = handler_fn(desc, action);
1094 if (action_ret == IRQ_WAKE_THREAD)
1095 irq_wake_secondary(desc, action);
1097 wake_threads_waitq(desc);
1101 * This is the regular exit path. __free_irq() is stopping the
1102 * thread via kthread_stop() after calling
1103 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1104 * oneshot mask bit can be set.
1106 task_work_cancel(current, irq_thread_dtor);
1111 * irq_wake_thread - wake the irq thread for the action identified by dev_id
1112 * @irq: Interrupt line
1113 * @dev_id: Device identity for which the thread should be woken
1116 void irq_wake_thread(unsigned int irq, void *dev_id)
1118 struct irq_desc *desc = irq_to_desc(irq);
1119 struct irqaction *action;
1120 unsigned long flags;
1122 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1125 raw_spin_lock_irqsave(&desc->lock, flags);
1126 for_each_action_of_desc(desc, action) {
1127 if (action->dev_id == dev_id) {
1129 __irq_wake_thread(desc, action);
1133 raw_spin_unlock_irqrestore(&desc->lock, flags);
1135 EXPORT_SYMBOL_GPL(irq_wake_thread);
1137 static int irq_setup_forced_threading(struct irqaction *new)
1139 if (!force_irqthreads)
1141 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1145 * No further action required for interrupts which are requested as
1146 * threaded interrupts already
1148 if (new->handler == irq_default_primary_handler)
1151 new->flags |= IRQF_ONESHOT;
1154 * Handle the case where we have a real primary handler and a
1155 * thread handler. We force thread them as well by creating a
1158 if (new->handler && new->thread_fn) {
1159 /* Allocate the secondary action */
1160 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1161 if (!new->secondary)
1163 new->secondary->handler = irq_forced_secondary_handler;
1164 new->secondary->thread_fn = new->thread_fn;
1165 new->secondary->dev_id = new->dev_id;
1166 new->secondary->irq = new->irq;
1167 new->secondary->name = new->name;
1169 /* Deal with the primary handler */
1170 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1171 new->thread_fn = new->handler;
1172 new->handler = irq_default_primary_handler;
1176 static int irq_request_resources(struct irq_desc *desc)
1178 struct irq_data *d = &desc->irq_data;
1179 struct irq_chip *c = d->chip;
1181 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1184 static void irq_release_resources(struct irq_desc *desc)
1186 struct irq_data *d = &desc->irq_data;
1187 struct irq_chip *c = d->chip;
1189 if (c->irq_release_resources)
1190 c->irq_release_resources(d);
1194 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1196 struct task_struct *t;
1197 struct sched_param param = {
1198 .sched_priority = MAX_USER_RT_PRIO/2,
1202 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1205 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1207 param.sched_priority -= 1;
1213 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1216 * We keep the reference to the task struct even if
1217 * the thread dies to avoid that the interrupt code
1218 * references an already freed task_struct.
1223 * Tell the thread to set its affinity. This is
1224 * important for shared interrupt handlers as we do
1225 * not invoke setup_affinity() for the secondary
1226 * handlers as everything is already set up. Even for
1227 * interrupts marked with IRQF_NO_BALANCE this is
1228 * correct as we want the thread to move to the cpu(s)
1229 * on which the requesting code placed the interrupt.
1231 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1236 * Internal function to register an irqaction - typically used to
1237 * allocate special interrupts that are part of the architecture.
1241 * desc->request_mutex Provides serialization against a concurrent free_irq()
1242 * chip_bus_lock Provides serialization for slow bus operations
1243 * desc->lock Provides serialization against hard interrupts
1245 * chip_bus_lock and desc->lock are sufficient for all other management and
1246 * interrupt related functions. desc->request_mutex solely serializes
1247 * request/free_irq().
1250 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1252 struct irqaction *old, **old_ptr;
1253 unsigned long flags, thread_mask = 0;
1254 int ret, nested, shared = 0;
1259 if (desc->irq_data.chip == &no_irq_chip)
1261 if (!try_module_get(desc->owner))
1267 * If the trigger type is not specified by the caller,
1268 * then use the default for this interrupt.
1270 if (!(new->flags & IRQF_TRIGGER_MASK))
1271 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1274 * Check whether the interrupt nests into another interrupt
1277 nested = irq_settings_is_nested_thread(desc);
1279 if (!new->thread_fn) {
1284 * Replace the primary handler which was provided from
1285 * the driver for non nested interrupt handling by the
1286 * dummy function which warns when called.
1288 new->handler = irq_nested_primary_handler;
1290 if (irq_settings_can_thread(desc)) {
1291 ret = irq_setup_forced_threading(new);
1298 * Create a handler thread when a thread function is supplied
1299 * and the interrupt does not nest into another interrupt
1302 if (new->thread_fn && !nested) {
1303 ret = setup_irq_thread(new, irq, false);
1306 if (new->secondary) {
1307 ret = setup_irq_thread(new->secondary, irq, true);
1314 * Drivers are often written to work w/o knowledge about the
1315 * underlying irq chip implementation, so a request for a
1316 * threaded irq without a primary hard irq context handler
1317 * requires the ONESHOT flag to be set. Some irq chips like
1318 * MSI based interrupts are per se one shot safe. Check the
1319 * chip flags, so we can avoid the unmask dance at the end of
1320 * the threaded handler for those.
1322 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1323 new->flags &= ~IRQF_ONESHOT;
1326 * Protects against a concurrent __free_irq() call which might wait
1327 * for synchronize_hardirq() to complete without holding the optional
1328 * chip bus lock and desc->lock. Also protects against handing out
1329 * a recycled oneshot thread_mask bit while it's still in use by
1330 * its previous owner.
1332 mutex_lock(&desc->request_mutex);
1335 * Acquire bus lock as the irq_request_resources() callback below
1336 * might rely on the serialization or the magic power management
1337 * functions which are abusing the irq_bus_lock() callback,
1339 chip_bus_lock(desc);
1341 /* First installed action requests resources. */
1342 if (!desc->action) {
1343 ret = irq_request_resources(desc);
1345 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1346 new->name, irq, desc->irq_data.chip->name);
1347 goto out_bus_unlock;
1352 * The following block of code has to be executed atomically
1353 * protected against a concurrent interrupt and any of the other
1354 * management calls which are not serialized via
1355 * desc->request_mutex or the optional bus lock.
1357 raw_spin_lock_irqsave(&desc->lock, flags);
1358 old_ptr = &desc->action;
1362 * Can't share interrupts unless both agree to and are
1363 * the same type (level, edge, polarity). So both flag
1364 * fields must have IRQF_SHARED set and the bits which
1365 * set the trigger type must match. Also all must
1368 unsigned int oldtype;
1371 * If nobody did set the configuration before, inherit
1372 * the one provided by the requester.
1374 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1375 oldtype = irqd_get_trigger_type(&desc->irq_data);
1377 oldtype = new->flags & IRQF_TRIGGER_MASK;
1378 irqd_set_trigger_type(&desc->irq_data, oldtype);
1381 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1382 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1383 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1386 /* All handlers must agree on per-cpuness */
1387 if ((old->flags & IRQF_PERCPU) !=
1388 (new->flags & IRQF_PERCPU))
1391 /* add new interrupt at end of irq queue */
1394 * Or all existing action->thread_mask bits,
1395 * so we can find the next zero bit for this
1398 thread_mask |= old->thread_mask;
1399 old_ptr = &old->next;
1406 * Setup the thread mask for this irqaction for ONESHOT. For
1407 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1408 * conditional in irq_wake_thread().
1410 if (new->flags & IRQF_ONESHOT) {
1412 * Unlikely to have 32 resp 64 irqs sharing one line,
1415 if (thread_mask == ~0UL) {
1420 * The thread_mask for the action is or'ed to
1421 * desc->thread_active to indicate that the
1422 * IRQF_ONESHOT thread handler has been woken, but not
1423 * yet finished. The bit is cleared when a thread
1424 * completes. When all threads of a shared interrupt
1425 * line have completed desc->threads_active becomes
1426 * zero and the interrupt line is unmasked. See
1427 * handle.c:irq_wake_thread() for further information.
1429 * If no thread is woken by primary (hard irq context)
1430 * interrupt handlers, then desc->threads_active is
1431 * also checked for zero to unmask the irq line in the
1432 * affected hard irq flow handlers
1433 * (handle_[fasteoi|level]_irq).
1435 * The new action gets the first zero bit of
1436 * thread_mask assigned. See the loop above which or's
1437 * all existing action->thread_mask bits.
1439 new->thread_mask = 1UL << ffz(thread_mask);
1441 } else if (new->handler == irq_default_primary_handler &&
1442 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1444 * The interrupt was requested with handler = NULL, so
1445 * we use the default primary handler for it. But it
1446 * does not have the oneshot flag set. In combination
1447 * with level interrupts this is deadly, because the
1448 * default primary handler just wakes the thread, then
1449 * the irq lines is reenabled, but the device still
1450 * has the level irq asserted. Rinse and repeat....
1452 * While this works for edge type interrupts, we play
1453 * it safe and reject unconditionally because we can't
1454 * say for sure which type this interrupt really
1455 * has. The type flags are unreliable as the
1456 * underlying chip implementation can override them.
1458 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1465 init_waitqueue_head(&desc->wait_for_threads);
1467 /* Setup the type (level, edge polarity) if configured: */
1468 if (new->flags & IRQF_TRIGGER_MASK) {
1469 ret = __irq_set_trigger(desc,
1470 new->flags & IRQF_TRIGGER_MASK);
1477 * Activate the interrupt. That activation must happen
1478 * independently of IRQ_NOAUTOEN. request_irq() can fail
1479 * and the callers are supposed to handle
1480 * that. enable_irq() of an interrupt requested with
1481 * IRQ_NOAUTOEN is not supposed to fail. The activation
1482 * keeps it in shutdown mode, it merily associates
1483 * resources if necessary and if that's not possible it
1484 * fails. Interrupts which are in managed shutdown mode
1485 * will simply ignore that activation request.
1487 ret = irq_activate(desc);
1491 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1492 IRQS_ONESHOT | IRQS_WAITING);
1493 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1495 if (new->flags & IRQF_PERCPU) {
1496 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1497 irq_settings_set_per_cpu(desc);
1500 if (new->flags & IRQF_ONESHOT)
1501 desc->istate |= IRQS_ONESHOT;
1503 /* Exclude IRQ from balancing if requested */
1504 if (new->flags & IRQF_NOBALANCING) {
1505 irq_settings_set_no_balancing(desc);
1506 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1509 if (irq_settings_can_autoenable(desc)) {
1510 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1513 * Shared interrupts do not go well with disabling
1514 * auto enable. The sharing interrupt might request
1515 * it while it's still disabled and then wait for
1516 * interrupts forever.
1518 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1519 /* Undo nested disables: */
1523 } else if (new->flags & IRQF_TRIGGER_MASK) {
1524 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1525 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1528 /* hope the handler works with current trigger mode */
1529 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1535 irq_pm_install_action(desc, new);
1537 /* Reset broken irq detection when installing new handler */
1538 desc->irq_count = 0;
1539 desc->irqs_unhandled = 0;
1542 * Check whether we disabled the irq via the spurious handler
1543 * before. Reenable it and give it another chance.
1545 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1546 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1550 raw_spin_unlock_irqrestore(&desc->lock, flags);
1551 chip_bus_sync_unlock(desc);
1552 mutex_unlock(&desc->request_mutex);
1554 irq_setup_timings(desc, new);
1557 * Strictly no need to wake it up, but hung_task complains
1558 * when no hard interrupt wakes the thread up.
1561 wake_up_process(new->thread);
1563 wake_up_process(new->secondary->thread);
1565 register_irq_proc(irq, desc);
1567 register_handler_proc(irq, new);
1571 if (!(new->flags & IRQF_PROBE_SHARED)) {
1572 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1573 irq, new->flags, new->name, old->flags, old->name);
1574 #ifdef CONFIG_DEBUG_SHIRQ
1581 raw_spin_unlock_irqrestore(&desc->lock, flags);
1584 irq_release_resources(desc);
1586 chip_bus_sync_unlock(desc);
1587 mutex_unlock(&desc->request_mutex);
1591 struct task_struct *t = new->thread;
1597 if (new->secondary && new->secondary->thread) {
1598 struct task_struct *t = new->secondary->thread;
1600 new->secondary->thread = NULL;
1605 module_put(desc->owner);
1610 * setup_irq - setup an interrupt
1611 * @irq: Interrupt line to setup
1612 * @act: irqaction for the interrupt
1614 * Used to statically setup interrupts in the early boot process.
1616 int setup_irq(unsigned int irq, struct irqaction *act)
1619 struct irq_desc *desc = irq_to_desc(irq);
1621 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1624 retval = irq_chip_pm_get(&desc->irq_data);
1628 retval = __setup_irq(irq, desc, act);
1631 irq_chip_pm_put(&desc->irq_data);
1635 EXPORT_SYMBOL_GPL(setup_irq);
1638 * Internal function to unregister an irqaction - used to free
1639 * regular and special interrupts that are part of the architecture.
1641 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1643 unsigned irq = desc->irq_data.irq;
1644 struct irqaction *action, **action_ptr;
1645 unsigned long flags;
1647 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1649 mutex_lock(&desc->request_mutex);
1650 chip_bus_lock(desc);
1651 raw_spin_lock_irqsave(&desc->lock, flags);
1654 * There can be multiple actions per IRQ descriptor, find the right
1655 * one based on the dev_id:
1657 action_ptr = &desc->action;
1659 action = *action_ptr;
1662 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1663 raw_spin_unlock_irqrestore(&desc->lock, flags);
1664 chip_bus_sync_unlock(desc);
1665 mutex_unlock(&desc->request_mutex);
1669 if (action->dev_id == dev_id)
1671 action_ptr = &action->next;
1674 /* Found it - now remove it from the list of entries: */
1675 *action_ptr = action->next;
1677 irq_pm_remove_action(desc, action);
1679 /* If this was the last handler, shut down the IRQ line: */
1680 if (!desc->action) {
1681 irq_settings_clr_disable_unlazy(desc);
1682 /* Only shutdown. Deactivate after synchronize_hardirq() */
1687 /* make sure affinity_hint is cleaned up */
1688 if (WARN_ON_ONCE(desc->affinity_hint))
1689 desc->affinity_hint = NULL;
1692 raw_spin_unlock_irqrestore(&desc->lock, flags);
1694 * Drop bus_lock here so the changes which were done in the chip
1695 * callbacks above are synced out to the irq chips which hang
1696 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1698 * Aside of that the bus_lock can also be taken from the threaded
1699 * handler in irq_finalize_oneshot() which results in a deadlock
1700 * because kthread_stop() would wait forever for the thread to
1701 * complete, which is blocked on the bus lock.
1703 * The still held desc->request_mutex() protects against a
1704 * concurrent request_irq() of this irq so the release of resources
1705 * and timing data is properly serialized.
1707 chip_bus_sync_unlock(desc);
1709 unregister_handler_proc(irq, action);
1712 * Make sure it's not being used on another CPU and if the chip
1713 * supports it also make sure that there is no (not yet serviced)
1714 * interrupt in flight at the hardware level.
1716 __synchronize_hardirq(desc, true);
1718 #ifdef CONFIG_DEBUG_SHIRQ
1720 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1721 * event to happen even now it's being freed, so let's make sure that
1722 * is so by doing an extra call to the handler ....
1724 * ( We do this after actually deregistering it, to make sure that a
1725 * 'real' IRQ doesn't run in parallel with our fake. )
1727 if (action->flags & IRQF_SHARED) {
1728 local_irq_save(flags);
1729 action->handler(irq, dev_id);
1730 local_irq_restore(flags);
1735 * The action has already been removed above, but the thread writes
1736 * its oneshot mask bit when it completes. Though request_mutex is
1737 * held across this which prevents __setup_irq() from handing out
1738 * the same bit to a newly requested action.
1740 if (action->thread) {
1741 kthread_stop(action->thread);
1742 put_task_struct(action->thread);
1743 if (action->secondary && action->secondary->thread) {
1744 kthread_stop(action->secondary->thread);
1745 put_task_struct(action->secondary->thread);
1749 /* Last action releases resources */
1750 if (!desc->action) {
1752 * Reaquire bus lock as irq_release_resources() might
1753 * require it to deallocate resources over the slow bus.
1755 chip_bus_lock(desc);
1757 * There is no interrupt on the fly anymore. Deactivate it
1760 raw_spin_lock_irqsave(&desc->lock, flags);
1761 irq_domain_deactivate_irq(&desc->irq_data);
1762 raw_spin_unlock_irqrestore(&desc->lock, flags);
1764 irq_release_resources(desc);
1765 chip_bus_sync_unlock(desc);
1766 irq_remove_timings(desc);
1769 mutex_unlock(&desc->request_mutex);
1771 irq_chip_pm_put(&desc->irq_data);
1772 module_put(desc->owner);
1773 kfree(action->secondary);
1778 * remove_irq - free an interrupt
1779 * @irq: Interrupt line to free
1780 * @act: irqaction for the interrupt
1782 * Used to remove interrupts statically setup by the early boot process.
1784 void remove_irq(unsigned int irq, struct irqaction *act)
1786 struct irq_desc *desc = irq_to_desc(irq);
1788 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1789 __free_irq(desc, act->dev_id);
1791 EXPORT_SYMBOL_GPL(remove_irq);
1794 * free_irq - free an interrupt allocated with request_irq
1795 * @irq: Interrupt line to free
1796 * @dev_id: Device identity to free
1798 * Remove an interrupt handler. The handler is removed and if the
1799 * interrupt line is no longer in use by any driver it is disabled.
1800 * On a shared IRQ the caller must ensure the interrupt is disabled
1801 * on the card it drives before calling this function. The function
1802 * does not return until any executing interrupts for this IRQ
1805 * This function must not be called from interrupt context.
1807 * Returns the devname argument passed to request_irq.
1809 const void *free_irq(unsigned int irq, void *dev_id)
1811 struct irq_desc *desc = irq_to_desc(irq);
1812 struct irqaction *action;
1813 const char *devname;
1815 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1819 if (WARN_ON(desc->affinity_notify))
1820 desc->affinity_notify = NULL;
1823 action = __free_irq(desc, dev_id);
1828 devname = action->name;
1832 EXPORT_SYMBOL(free_irq);
1835 * request_threaded_irq - allocate an interrupt line
1836 * @irq: Interrupt line to allocate
1837 * @handler: Function to be called when the IRQ occurs.
1838 * Primary handler for threaded interrupts
1839 * If NULL and thread_fn != NULL the default
1840 * primary handler is installed
1841 * @thread_fn: Function called from the irq handler thread
1842 * If NULL, no irq thread is created
1843 * @irqflags: Interrupt type flags
1844 * @devname: An ascii name for the claiming device
1845 * @dev_id: A cookie passed back to the handler function
1847 * This call allocates interrupt resources and enables the
1848 * interrupt line and IRQ handling. From the point this
1849 * call is made your handler function may be invoked. Since
1850 * your handler function must clear any interrupt the board
1851 * raises, you must take care both to initialise your hardware
1852 * and to set up the interrupt handler in the right order.
1854 * If you want to set up a threaded irq handler for your device
1855 * then you need to supply @handler and @thread_fn. @handler is
1856 * still called in hard interrupt context and has to check
1857 * whether the interrupt originates from the device. If yes it
1858 * needs to disable the interrupt on the device and return
1859 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1860 * @thread_fn. This split handler design is necessary to support
1861 * shared interrupts.
1863 * Dev_id must be globally unique. Normally the address of the
1864 * device data structure is used as the cookie. Since the handler
1865 * receives this value it makes sense to use it.
1867 * If your interrupt is shared you must pass a non NULL dev_id
1868 * as this is required when freeing the interrupt.
1872 * IRQF_SHARED Interrupt is shared
1873 * IRQF_TRIGGER_* Specify active edge(s) or level
1876 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1877 irq_handler_t thread_fn, unsigned long irqflags,
1878 const char *devname, void *dev_id)
1880 struct irqaction *action;
1881 struct irq_desc *desc;
1884 if (irq == IRQ_NOTCONNECTED)
1888 * Sanity-check: shared interrupts must pass in a real dev-ID,
1889 * otherwise we'll have trouble later trying to figure out
1890 * which interrupt is which (messes up the interrupt freeing
1893 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1894 * it cannot be set along with IRQF_NO_SUSPEND.
1896 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1897 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1898 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1901 desc = irq_to_desc(irq);
1905 if (!irq_settings_can_request(desc) ||
1906 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1912 handler = irq_default_primary_handler;
1915 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1919 action->handler = handler;
1920 action->thread_fn = thread_fn;
1921 action->flags = irqflags;
1922 action->name = devname;
1923 action->dev_id = dev_id;
1925 retval = irq_chip_pm_get(&desc->irq_data);
1931 retval = __setup_irq(irq, desc, action);
1934 irq_chip_pm_put(&desc->irq_data);
1935 kfree(action->secondary);
1939 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1940 if (!retval && (irqflags & IRQF_SHARED)) {
1942 * It's a shared IRQ -- the driver ought to be prepared for it
1943 * to happen immediately, so let's make sure....
1944 * We disable the irq to make sure that a 'real' IRQ doesn't
1945 * run in parallel with our fake.
1947 unsigned long flags;
1950 local_irq_save(flags);
1952 handler(irq, dev_id);
1954 local_irq_restore(flags);
1960 EXPORT_SYMBOL(request_threaded_irq);
1963 * request_any_context_irq - allocate an interrupt line
1964 * @irq: Interrupt line to allocate
1965 * @handler: Function to be called when the IRQ occurs.
1966 * Threaded handler for threaded interrupts.
1967 * @flags: Interrupt type flags
1968 * @name: An ascii name for the claiming device
1969 * @dev_id: A cookie passed back to the handler function
1971 * This call allocates interrupt resources and enables the
1972 * interrupt line and IRQ handling. It selects either a
1973 * hardirq or threaded handling method depending on the
1976 * On failure, it returns a negative value. On success,
1977 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1979 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1980 unsigned long flags, const char *name, void *dev_id)
1982 struct irq_desc *desc;
1985 if (irq == IRQ_NOTCONNECTED)
1988 desc = irq_to_desc(irq);
1992 if (irq_settings_is_nested_thread(desc)) {
1993 ret = request_threaded_irq(irq, NULL, handler,
1994 flags, name, dev_id);
1995 return !ret ? IRQC_IS_NESTED : ret;
1998 ret = request_irq(irq, handler, flags, name, dev_id);
1999 return !ret ? IRQC_IS_HARDIRQ : ret;
2001 EXPORT_SYMBOL_GPL(request_any_context_irq);
2003 void enable_percpu_irq(unsigned int irq, unsigned int type)
2005 unsigned int cpu = smp_processor_id();
2006 unsigned long flags;
2007 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2013 * If the trigger type is not specified by the caller, then
2014 * use the default for this interrupt.
2016 type &= IRQ_TYPE_SENSE_MASK;
2017 if (type == IRQ_TYPE_NONE)
2018 type = irqd_get_trigger_type(&desc->irq_data);
2020 if (type != IRQ_TYPE_NONE) {
2023 ret = __irq_set_trigger(desc, type);
2026 WARN(1, "failed to set type for IRQ%d\n", irq);
2031 irq_percpu_enable(desc, cpu);
2033 irq_put_desc_unlock(desc, flags);
2035 EXPORT_SYMBOL_GPL(enable_percpu_irq);
2038 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2039 * @irq: Linux irq number to check for
2041 * Must be called from a non migratable context. Returns the enable
2042 * state of a per cpu interrupt on the current cpu.
2044 bool irq_percpu_is_enabled(unsigned int irq)
2046 unsigned int cpu = smp_processor_id();
2047 struct irq_desc *desc;
2048 unsigned long flags;
2051 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2055 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2056 irq_put_desc_unlock(desc, flags);
2060 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2062 void disable_percpu_irq(unsigned int irq)
2064 unsigned int cpu = smp_processor_id();
2065 unsigned long flags;
2066 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2071 irq_percpu_disable(desc, cpu);
2072 irq_put_desc_unlock(desc, flags);
2074 EXPORT_SYMBOL_GPL(disable_percpu_irq);
2077 * Internal function to unregister a percpu irqaction.
2079 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2081 struct irq_desc *desc = irq_to_desc(irq);
2082 struct irqaction *action;
2083 unsigned long flags;
2085 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2090 raw_spin_lock_irqsave(&desc->lock, flags);
2092 action = desc->action;
2093 if (!action || action->percpu_dev_id != dev_id) {
2094 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2098 if (!cpumask_empty(desc->percpu_enabled)) {
2099 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2100 irq, cpumask_first(desc->percpu_enabled));
2104 /* Found it - now remove it from the list of entries: */
2105 desc->action = NULL;
2107 raw_spin_unlock_irqrestore(&desc->lock, flags);
2109 unregister_handler_proc(irq, action);
2111 irq_chip_pm_put(&desc->irq_data);
2112 module_put(desc->owner);
2116 raw_spin_unlock_irqrestore(&desc->lock, flags);
2121 * remove_percpu_irq - free a per-cpu interrupt
2122 * @irq: Interrupt line to free
2123 * @act: irqaction for the interrupt
2125 * Used to remove interrupts statically setup by the early boot process.
2127 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2129 struct irq_desc *desc = irq_to_desc(irq);
2131 if (desc && irq_settings_is_per_cpu_devid(desc))
2132 __free_percpu_irq(irq, act->percpu_dev_id);
2136 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
2137 * @irq: Interrupt line to free
2138 * @dev_id: Device identity to free
2140 * Remove a percpu interrupt handler. The handler is removed, but
2141 * the interrupt line is not disabled. This must be done on each
2142 * CPU before calling this function. The function does not return
2143 * until any executing interrupts for this IRQ have completed.
2145 * This function must not be called from interrupt context.
2147 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2149 struct irq_desc *desc = irq_to_desc(irq);
2151 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2154 chip_bus_lock(desc);
2155 kfree(__free_percpu_irq(irq, dev_id));
2156 chip_bus_sync_unlock(desc);
2158 EXPORT_SYMBOL_GPL(free_percpu_irq);
2161 * setup_percpu_irq - setup a per-cpu interrupt
2162 * @irq: Interrupt line to setup
2163 * @act: irqaction for the interrupt
2165 * Used to statically setup per-cpu interrupts in the early boot process.
2167 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2169 struct irq_desc *desc = irq_to_desc(irq);
2172 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2175 retval = irq_chip_pm_get(&desc->irq_data);
2179 retval = __setup_irq(irq, desc, act);
2182 irq_chip_pm_put(&desc->irq_data);
2188 * __request_percpu_irq - allocate a percpu interrupt line
2189 * @irq: Interrupt line to allocate
2190 * @handler: Function to be called when the IRQ occurs.
2191 * @flags: Interrupt type flags (IRQF_TIMER only)
2192 * @devname: An ascii name for the claiming device
2193 * @dev_id: A percpu cookie passed back to the handler function
2195 * This call allocates interrupt resources and enables the
2196 * interrupt on the local CPU. If the interrupt is supposed to be
2197 * enabled on other CPUs, it has to be done on each CPU using
2198 * enable_percpu_irq().
2200 * Dev_id must be globally unique. It is a per-cpu variable, and
2201 * the handler gets called with the interrupted CPU's instance of
2204 int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2205 unsigned long flags, const char *devname,
2206 void __percpu *dev_id)
2208 struct irqaction *action;
2209 struct irq_desc *desc;
2215 desc = irq_to_desc(irq);
2216 if (!desc || !irq_settings_can_request(desc) ||
2217 !irq_settings_is_per_cpu_devid(desc))
2220 if (flags && flags != IRQF_TIMER)
2223 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2227 action->handler = handler;
2228 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2229 action->name = devname;
2230 action->percpu_dev_id = dev_id;
2232 retval = irq_chip_pm_get(&desc->irq_data);
2238 retval = __setup_irq(irq, desc, action);
2241 irq_chip_pm_put(&desc->irq_data);
2247 EXPORT_SYMBOL_GPL(__request_percpu_irq);
2249 int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2252 struct irq_chip *chip;
2256 chip = irq_data_get_irq_chip(data);
2257 if (chip->irq_get_irqchip_state)
2259 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2260 data = data->parent_data;
2267 err = chip->irq_get_irqchip_state(data, which, state);
2272 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2273 * @irq: Interrupt line that is forwarded to a VM
2274 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2275 * @state: a pointer to a boolean where the state is to be storeed
2277 * This call snapshots the internal irqchip state of an
2278 * interrupt, returning into @state the bit corresponding to
2281 * This function should be called with preemption disabled if the
2282 * interrupt controller has per-cpu registers.
2284 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2287 struct irq_desc *desc;
2288 struct irq_data *data;
2289 unsigned long flags;
2292 desc = irq_get_desc_buslock(irq, &flags, 0);
2296 data = irq_desc_get_irq_data(desc);
2298 err = __irq_get_irqchip_state(data, which, state);
2300 irq_put_desc_busunlock(desc, flags);
2303 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2306 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2307 * @irq: Interrupt line that is forwarded to a VM
2308 * @which: State to be restored (one of IRQCHIP_STATE_*)
2309 * @val: Value corresponding to @which
2311 * This call sets the internal irqchip state of an interrupt,
2312 * depending on the value of @which.
2314 * This function should be called with preemption disabled if the
2315 * interrupt controller has per-cpu registers.
2317 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2320 struct irq_desc *desc;
2321 struct irq_data *data;
2322 struct irq_chip *chip;
2323 unsigned long flags;
2326 desc = irq_get_desc_buslock(irq, &flags, 0);
2330 data = irq_desc_get_irq_data(desc);
2333 chip = irq_data_get_irq_chip(data);
2334 if (chip->irq_set_irqchip_state)
2336 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2337 data = data->parent_data;
2344 err = chip->irq_set_irqchip_state(data, which, val);
2346 irq_put_desc_busunlock(desc, flags);
2349 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);