1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef _LINUX_INTERRUPT_H
4 #define _LINUX_INTERRUPT_H
6 #include <linux/kernel.h>
7 #include <linux/bitops.h>
8 #include <linux/cpumask.h>
9 #include <linux/irqreturn.h>
10 #include <linux/irqnr.h>
11 #include <linux/hardirq.h>
12 #include <linux/irqflags.h>
13 #include <linux/hrtimer.h>
14 #include <linux/kref.h>
15 #include <linux/workqueue.h>
17 #include <linux/atomic.h>
18 #include <asm/ptrace.h>
20 #include <asm/sections.h>
23 * These correspond to the IORESOURCE_IRQ_* defines in
24 * linux/ioport.h to select the interrupt line behaviour. When
25 * requesting an interrupt without specifying a IRQF_TRIGGER, the
26 * setting should be assumed to be "as already configured", which
27 * may be as per machine or firmware initialisation.
29 #define IRQF_TRIGGER_NONE 0x00000000
30 #define IRQF_TRIGGER_RISING 0x00000001
31 #define IRQF_TRIGGER_FALLING 0x00000002
32 #define IRQF_TRIGGER_HIGH 0x00000004
33 #define IRQF_TRIGGER_LOW 0x00000008
34 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36 #define IRQF_TRIGGER_PROBE 0x00000010
39 * These flags used only by the kernel as part of the
40 * irq handling routines.
42 * IRQF_SHARED - allow sharing the irq among several devices
43 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
44 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
45 * IRQF_PERCPU - Interrupt is per cpu
46 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
47 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
48 * registered first in a shared interrupt is considered for
49 * performance reasons)
50 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
51 * Used by threaded interrupts which need to keep the
52 * irq line disabled until the threaded handler has been run.
53 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
54 * that this interrupt will wake the system from a suspended
55 * state. See Documentation/power/suspend-and-interrupts.rst
56 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
57 * IRQF_NO_THREAD - Interrupt cannot be threaded
58 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
60 * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
61 * interrupt handler after suspending interrupts. For system
62 * wakeup devices users need to implement wakeup detection in
63 * their interrupt handlers.
65 #define IRQF_SHARED 0x00000080
66 #define IRQF_PROBE_SHARED 0x00000100
67 #define __IRQF_TIMER 0x00000200
68 #define IRQF_PERCPU 0x00000400
69 #define IRQF_NOBALANCING 0x00000800
70 #define IRQF_IRQPOLL 0x00001000
71 #define IRQF_ONESHOT 0x00002000
72 #define IRQF_NO_SUSPEND 0x00004000
73 #define IRQF_FORCE_RESUME 0x00008000
74 #define IRQF_NO_THREAD 0x00010000
75 #define IRQF_EARLY_RESUME 0x00020000
76 #define IRQF_COND_SUSPEND 0x00040000
78 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
81 * These values can be returned by request_any_context_irq() and
82 * describe the context the interrupt will be run in.
84 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
85 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
92 typedef irqreturn_t (*irq_handler_t)(int, void *);
95 * struct irqaction - per interrupt action descriptor
96 * @handler: interrupt handler function
97 * @name: name of the device
98 * @dev_id: cookie to identify the device
99 * @percpu_dev_id: cookie to identify the device
100 * @next: pointer to the next irqaction for shared interrupts
101 * @irq: interrupt number
102 * @flags: flags (see IRQF_* above)
103 * @thread_fn: interrupt handler function for threaded interrupts
104 * @thread: thread pointer for threaded interrupts
105 * @secondary: pointer to secondary irqaction (force threading)
106 * @thread_flags: flags related to @thread
107 * @thread_mask: bitmask for keeping track of @thread activity
108 * @dir: pointer to the proc/irq/NN/name entry
111 irq_handler_t handler;
113 void __percpu *percpu_dev_id;
114 struct irqaction *next;
115 irq_handler_t thread_fn;
116 struct task_struct *thread;
117 struct irqaction *secondary;
120 unsigned long thread_flags;
121 unsigned long thread_mask;
123 struct proc_dir_entry *dir;
124 } ____cacheline_internodealigned_in_smp;
126 extern irqreturn_t no_action(int cpl, void *dev_id);
129 * If a (PCI) device interrupt is not connected we set dev->irq to
130 * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
131 * can distingiush that case from other error returns.
133 * 0x80000000 is guaranteed to be outside the available range of interrupts
134 * and easy to distinguish from other possible incorrect values.
136 #define IRQ_NOTCONNECTED (1U << 31)
138 extern int __must_check
139 request_threaded_irq(unsigned int irq, irq_handler_t handler,
140 irq_handler_t thread_fn,
141 unsigned long flags, const char *name, void *dev);
143 static inline int __must_check
144 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
145 const char *name, void *dev)
147 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
150 extern int __must_check
151 request_any_context_irq(unsigned int irq, irq_handler_t handler,
152 unsigned long flags, const char *name, void *dev_id);
154 extern int __must_check
155 __request_percpu_irq(unsigned int irq, irq_handler_t handler,
156 unsigned long flags, const char *devname,
157 void __percpu *percpu_dev_id);
159 extern int __must_check
160 request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
161 const char *name, void *dev);
163 static inline int __must_check
164 request_percpu_irq(unsigned int irq, irq_handler_t handler,
165 const char *devname, void __percpu *percpu_dev_id)
167 return __request_percpu_irq(irq, handler, 0,
168 devname, percpu_dev_id);
171 extern int __must_check
172 request_percpu_nmi(unsigned int irq, irq_handler_t handler,
173 const char *devname, void __percpu *dev);
175 extern const void *free_irq(unsigned int, void *);
176 extern void free_percpu_irq(unsigned int, void __percpu *);
178 extern const void *free_nmi(unsigned int irq, void *dev_id);
179 extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
183 extern int __must_check
184 devm_request_threaded_irq(struct device *dev, unsigned int irq,
185 irq_handler_t handler, irq_handler_t thread_fn,
186 unsigned long irqflags, const char *devname,
189 static inline int __must_check
190 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
191 unsigned long irqflags, const char *devname, void *dev_id)
193 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
197 extern int __must_check
198 devm_request_any_context_irq(struct device *dev, unsigned int irq,
199 irq_handler_t handler, unsigned long irqflags,
200 const char *devname, void *dev_id);
202 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
205 * On lockdep we dont want to enable hardirqs in hardirq
206 * context. Use local_irq_enable_in_hardirq() to annotate
207 * kernel code that has to do this nevertheless (pretty much
208 * the only valid case is for old/broken hardware that is
211 * NOTE: in theory this might break fragile code that relies
212 * on hardirq delivery - in practice we dont seem to have such
213 * places left. So the only effect should be slightly increased
214 * irqs-off latencies.
216 #ifdef CONFIG_LOCKDEP
217 # define local_irq_enable_in_hardirq() do { } while (0)
219 # define local_irq_enable_in_hardirq() local_irq_enable()
222 extern void disable_irq_nosync(unsigned int irq);
223 extern bool disable_hardirq(unsigned int irq);
224 extern void disable_irq(unsigned int irq);
225 extern void disable_percpu_irq(unsigned int irq);
226 extern void enable_irq(unsigned int irq);
227 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
228 extern bool irq_percpu_is_enabled(unsigned int irq);
229 extern void irq_wake_thread(unsigned int irq, void *dev_id);
231 extern void disable_nmi_nosync(unsigned int irq);
232 extern void disable_percpu_nmi(unsigned int irq);
233 extern void enable_nmi(unsigned int irq);
234 extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
235 extern int prepare_percpu_nmi(unsigned int irq);
236 extern void teardown_percpu_nmi(unsigned int irq);
238 /* The following three functions are for the core kernel use only. */
239 extern void suspend_device_irqs(void);
240 extern void resume_device_irqs(void);
241 extern void rearm_wake_irq(unsigned int irq);
244 * struct irq_affinity_notify - context for notification of IRQ affinity changes
245 * @irq: Interrupt to which notification applies
246 * @kref: Reference count, for internal use
247 * @work: Work item, for internal use
248 * @notify: Function to be called on change. This will be
249 * called in process context.
250 * @release: Function to be called on release. This will be
251 * called in process context. Once registered, the
252 * structure must only be freed when this function is
255 struct irq_affinity_notify {
258 struct work_struct work;
259 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
260 void (*release)(struct kref *ref);
263 #define IRQ_AFFINITY_MAX_SETS 4
266 * struct irq_affinity - Description for automatic irq affinity assignements
267 * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
268 * the MSI(-X) vector space
269 * @post_vectors: Don't apply affinity to @post_vectors at end of
270 * the MSI(-X) vector space
271 * @nr_sets: The number of interrupt sets for which affinity
272 * spreading is required
273 * @set_size: Array holding the size of each interrupt set
274 * @calc_sets: Callback for calculating the number and size
276 * @priv: Private data for usage by @calc_sets, usually a
277 * pointer to driver/device specific data.
279 struct irq_affinity {
280 unsigned int pre_vectors;
281 unsigned int post_vectors;
282 unsigned int nr_sets;
283 unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
284 void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
289 * struct irq_affinity_desc - Interrupt affinity descriptor
290 * @mask: cpumask to hold the affinity assignment
291 * @is_managed: 1 if the interrupt is managed internally
293 struct irq_affinity_desc {
295 unsigned int is_managed : 1;
298 #if defined(CONFIG_SMP)
300 extern cpumask_var_t irq_default_affinity;
302 /* Internal implementation. Use the helpers below */
303 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
307 * irq_set_affinity - Set the irq affinity of a given irq
308 * @irq: Interrupt to set affinity
311 * Fails if cpumask does not contain an online CPU
314 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
316 return __irq_set_affinity(irq, cpumask, false);
320 * irq_force_affinity - Force the irq affinity of a given irq
321 * @irq: Interrupt to set affinity
324 * Same as irq_set_affinity, but without checking the mask against
327 * Solely for low level cpu hotplug code, where we need to make per
328 * cpu interrupts affine before the cpu becomes online.
331 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
333 return __irq_set_affinity(irq, cpumask, true);
336 extern int irq_can_set_affinity(unsigned int irq);
337 extern int irq_select_affinity(unsigned int irq);
339 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
342 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
344 struct irq_affinity_desc *
345 irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
347 unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
348 const struct irq_affinity *affd);
350 #else /* CONFIG_SMP */
352 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
357 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
362 static inline int irq_can_set_affinity(unsigned int irq)
367 static inline int irq_select_affinity(unsigned int irq) { return 0; }
369 static inline int irq_set_affinity_hint(unsigned int irq,
370 const struct cpumask *m)
376 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
381 static inline struct irq_affinity_desc *
382 irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
387 static inline unsigned int
388 irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
389 const struct irq_affinity *affd)
394 #endif /* CONFIG_SMP */
397 * Special lockdep variants of irq disabling/enabling.
398 * These should be used for locking constructs that
399 * know that a particular irq context which is disabled,
400 * and which is the only irq-context user of a lock,
401 * that it's safe to take the lock in the irq-disabled
402 * section without disabling hardirqs.
404 * On !CONFIG_LOCKDEP they are equivalent to the normal
405 * irq disable/enable methods.
407 static inline void disable_irq_nosync_lockdep(unsigned int irq)
409 disable_irq_nosync(irq);
410 #ifdef CONFIG_LOCKDEP
415 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
417 disable_irq_nosync(irq);
418 #ifdef CONFIG_LOCKDEP
419 local_irq_save(*flags);
423 static inline void disable_irq_lockdep(unsigned int irq)
426 #ifdef CONFIG_LOCKDEP
431 static inline void enable_irq_lockdep(unsigned int irq)
433 #ifdef CONFIG_LOCKDEP
439 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
441 #ifdef CONFIG_LOCKDEP
442 local_irq_restore(*flags);
447 /* IRQ wakeup (PM) control: */
448 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
450 static inline int enable_irq_wake(unsigned int irq)
452 return irq_set_irq_wake(irq, 1);
455 static inline int disable_irq_wake(unsigned int irq)
457 return irq_set_irq_wake(irq, 0);
461 * irq_get_irqchip_state/irq_set_irqchip_state specific flags
463 enum irqchip_irq_state {
464 IRQCHIP_STATE_PENDING, /* Is interrupt pending? */
465 IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */
466 IRQCHIP_STATE_MASKED, /* Is interrupt masked? */
467 IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */
470 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
472 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
475 #ifdef CONFIG_IRQ_FORCED_THREADING
476 # ifdef CONFIG_PREEMPT_RT
477 # define force_irqthreads (true)
479 extern bool force_irqthreads;
482 #define force_irqthreads (0)
485 #ifndef local_softirq_pending
487 #ifndef local_softirq_pending_ref
488 #define local_softirq_pending_ref irq_stat.__softirq_pending
491 #define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
492 #define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x)))
493 #define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x)))
495 #endif /* local_softirq_pending */
497 /* Some architectures might implement lazy enabling/disabling of
498 * interrupts. In some cases, such as stop_machine, we might want
499 * to ensure that after a local_irq_disable(), interrupts have
500 * really been disabled in hardware. Such architectures need to
501 * implement the following hook.
503 #ifndef hard_irq_disable
504 #define hard_irq_disable() do { } while(0)
507 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
508 frequency threaded job scheduling. For almost all the purposes
509 tasklets are more than enough. F.e. all serial device BHs et
510 al. should be converted to tasklets, not to softirqs.
523 HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
525 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
530 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
532 /* map softirq index to softirq name. update 'softirq_to_name' in
533 * kernel/softirq.c when adding a new softirq.
535 extern const char * const softirq_to_name[NR_SOFTIRQS];
537 /* softirq mask and active fields moved to irq_cpustat_t in
538 * asm/hardirq.h to get better cache usage. KAO
541 struct softirq_action
543 void (*action)(struct softirq_action *);
546 asmlinkage void do_softirq(void);
547 asmlinkage void __do_softirq(void);
549 #ifdef __ARCH_HAS_DO_SOFTIRQ
550 void do_softirq_own_stack(void);
552 static inline void do_softirq_own_stack(void)
558 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
559 extern void softirq_init(void);
560 extern void __raise_softirq_irqoff(unsigned int nr);
562 extern void raise_softirq_irqoff(unsigned int nr);
563 extern void raise_softirq(unsigned int nr);
565 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
567 static inline struct task_struct *this_cpu_ksoftirqd(void)
569 return this_cpu_read(ksoftirqd);
572 /* Tasklets --- multithreaded analogue of BHs.
574 Main feature differing them of generic softirqs: tasklet
575 is running only on one CPU simultaneously.
577 Main feature differing them of BHs: different tasklets
578 may be run simultaneously on different CPUs.
581 * If tasklet_schedule() is called, then tasklet is guaranteed
582 to be executed on some cpu at least once after this.
583 * If the tasklet is already scheduled, but its execution is still not
584 started, it will be executed only once.
585 * If this tasklet is already running on another CPU (or schedule is called
586 from tasklet itself), it is rescheduled for later.
587 * Tasklet is strictly serialized wrt itself, but not
588 wrt another tasklets. If client needs some intertask synchronization,
589 he makes it with spinlocks.
592 struct tasklet_struct
594 struct tasklet_struct *next;
597 void (*func)(unsigned long);
601 #define DECLARE_TASKLET_OLD(name, _func) \
602 struct tasklet_struct name = { \
603 .count = ATOMIC_INIT(0), \
607 #define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
608 struct tasklet_struct name = { \
609 .count = ATOMIC_INIT(1), \
615 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
616 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
620 static inline int tasklet_trylock(struct tasklet_struct *t)
622 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
625 static inline void tasklet_unlock(struct tasklet_struct *t)
627 smp_mb__before_atomic();
628 clear_bit(TASKLET_STATE_RUN, &(t)->state);
631 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
633 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
636 #define tasklet_trylock(t) 1
637 #define tasklet_unlock_wait(t) do { } while (0)
638 #define tasklet_unlock(t) do { } while (0)
641 extern void __tasklet_schedule(struct tasklet_struct *t);
643 static inline void tasklet_schedule(struct tasklet_struct *t)
645 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
646 __tasklet_schedule(t);
649 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
651 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
653 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
654 __tasklet_hi_schedule(t);
657 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
659 atomic_inc(&t->count);
660 smp_mb__after_atomic();
663 static inline void tasklet_disable(struct tasklet_struct *t)
665 tasklet_disable_nosync(t);
666 tasklet_unlock_wait(t);
670 static inline void tasklet_enable(struct tasklet_struct *t)
672 smp_mb__before_atomic();
673 atomic_dec(&t->count);
676 extern void tasklet_kill(struct tasklet_struct *t);
677 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
678 extern void tasklet_init(struct tasklet_struct *t,
679 void (*func)(unsigned long), unsigned long data);
682 * Autoprobing for irqs:
684 * probe_irq_on() and probe_irq_off() provide robust primitives
685 * for accurate IRQ probing during kernel initialization. They are
686 * reasonably simple to use, are not "fooled" by spurious interrupts,
687 * and, unlike other attempts at IRQ probing, they do not get hung on
688 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
690 * For reasonably foolproof probing, use them as follows:
692 * 1. clear and/or mask the device's internal interrupt.
694 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
695 * 4. enable the device and cause it to trigger an interrupt.
696 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
697 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
698 * 7. service the device to clear its pending interrupt.
699 * 8. loop again if paranoia is required.
701 * probe_irq_on() returns a mask of allocated irq's.
703 * probe_irq_off() takes the mask as a parameter,
704 * and returns the irq number which occurred,
705 * or zero if none occurred, or a negative irq number
706 * if more than one irq occurred.
709 #if !defined(CONFIG_GENERIC_IRQ_PROBE)
710 static inline unsigned long probe_irq_on(void)
714 static inline int probe_irq_off(unsigned long val)
718 static inline unsigned int probe_irq_mask(unsigned long val)
723 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
724 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
725 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
728 #ifdef CONFIG_PROC_FS
729 /* Initialize /proc/irq/ */
730 extern void init_irq_proc(void);
732 static inline void init_irq_proc(void)
737 #ifdef CONFIG_IRQ_TIMINGS
738 void irq_timings_enable(void);
739 void irq_timings_disable(void);
740 u64 irq_timings_next_event(u64 now);
744 int show_interrupts(struct seq_file *p, void *v);
745 int arch_show_interrupts(struct seq_file *p, int prec);
747 extern int early_irq_init(void);
748 extern int arch_probe_nr_irqs(void);
749 extern int arch_early_irq_init(void);
752 * We want to know which function is an entrypoint of a hardirq or a softirq.
754 #define __irq_entry __attribute__((__section__(".irqentry.text")))
755 #define __softirq_entry \
756 __attribute__((__section__(".softirqentry.text")))