1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 - Cambridge Greys Ltd
4 * Copyright (C) 2011 - 2014 Cisco Systems Inc
5 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
7 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
18 #include <as-layout.h>
19 #include <kern_util.h>
23 #include <linux/time-internal.h>
26 extern void free_irqs(void);
28 /* When epoll triggers we do not know why it did so
29 * we can also have different IRQs for read and write.
30 * This is why we keep a small irq_reg array for each fd -
31 * one entry per IRQ type
36 /* it's cheaper to store this than to query it */
41 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
42 bool pending_on_resume;
43 void (*timetravel_handler)(int, int, void *,
44 struct time_travel_event *);
45 struct time_travel_event event;
50 struct list_head list;
52 struct irq_reg reg[NUM_IRQ_TYPES];
54 bool sigio_workaround;
57 static DEFINE_SPINLOCK(irq_lock);
58 static LIST_HEAD(active_fds);
59 static DECLARE_BITMAP(irqs_allocated, UM_LAST_SIGNAL_IRQ);
60 static bool irqs_suspended;
62 static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
65 * irq->active guards against reentry
66 * irq->pending accumulates pending requests
67 * if pending is raised the irq_handler is re-run
68 * until pending is cleared
75 do_IRQ(irq->irq, regs);
76 } while (irq->pending);
84 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
85 static void irq_event_handler(struct time_travel_event *ev)
87 struct irq_reg *reg = container_of(ev, struct irq_reg, event);
89 /* do nothing if suspended - just to cause a wakeup */
93 generic_handle_irq(reg->irq);
96 static bool irq_do_timetravel_handler(struct irq_entry *entry,
99 struct irq_reg *reg = &entry->reg[t];
101 if (!reg->timetravel_handler)
105 * Handle all messages - we might get multiple even while
106 * interrupts are already suspended, due to suspend order
107 * etc. Note that time_travel_add_irq_event() will not add
108 * an event twice, if it's pending already "first wins".
110 reg->timetravel_handler(reg->irq, entry->fd, reg->id, ®->event);
112 if (!reg->event.pending)
116 reg->pending_on_resume = true;
120 static bool irq_do_timetravel_handler(struct irq_entry *entry,
127 static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t,
128 struct uml_pt_regs *regs,
129 bool timetravel_handlers_only)
131 struct irq_reg *reg = &entry->reg[t];
136 if (os_epoll_triggered(idx, reg->events) <= 0)
139 if (irq_do_timetravel_handler(entry, t))
143 * If we're called to only run time-travel handlers then don't
144 * actually proceed but mark sigio as pending (if applicable).
145 * For suspend/resume, timetravel_handlers_only may be true
146 * despite time-travel not being configured and used.
148 if (timetravel_handlers_only) {
149 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
150 mark_sigio_pending();
155 irq_io_loop(reg, regs);
158 static void _sigio_handler(struct uml_pt_regs *regs,
159 bool timetravel_handlers_only)
161 struct irq_entry *irq_entry;
164 if (timetravel_handlers_only && !um_irq_timetravel_handler_used())
168 /* This is now lockless - epoll keeps back-referencesto the irqs
169 * which have trigger it so there is no need to walk the irq
170 * list and lock it every time. We avoid locking by turning off
171 * IO for a specific fd by executing os_del_epoll_fd(fd) before
172 * we do any changes to the actual data structures
174 n = os_waiting_for_events_epoll();
183 for (i = 0; i < n ; i++) {
186 irq_entry = os_epoll_get_data_pointer(i);
188 for (t = 0; t < NUM_IRQ_TYPES; t++)
189 sigio_reg_handler(i, irq_entry, t, regs,
190 timetravel_handlers_only);
194 if (!timetravel_handlers_only)
198 void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
200 _sigio_handler(regs, irqs_suspended);
203 static struct irq_entry *get_irq_entry_by_fd(int fd)
205 struct irq_entry *walk;
207 lockdep_assert_held(&irq_lock);
209 list_for_each_entry(walk, &active_fds, list) {
217 static void free_irq_entry(struct irq_entry *to_free, bool remove)
223 os_del_epoll_fd(to_free->fd);
224 list_del(&to_free->list);
228 static bool update_irq_entry(struct irq_entry *entry)
233 for (i = 0; i < NUM_IRQ_TYPES; i++)
234 events |= entry->reg[i].events;
237 /* will modify (instead of add) if needed */
238 os_add_epoll_fd(events, entry->fd, entry);
242 os_del_epoll_fd(entry->fd);
246 static void update_or_free_irq_entry(struct irq_entry *entry)
248 if (!update_irq_entry(entry))
249 free_irq_entry(entry, false);
252 static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
253 void (*timetravel_handler)(int, int, void *,
254 struct time_travel_event *))
256 struct irq_entry *irq_entry;
257 int err, events = os_event_mask(type);
260 err = os_set_fd_async(fd);
264 spin_lock_irqsave(&irq_lock, flags);
265 irq_entry = get_irq_entry_by_fd(fd);
267 /* cannot register the same FD twice with the same type */
268 if (WARN_ON(irq_entry->reg[type].events)) {
273 /* temporarily disable to avoid IRQ-side locking */
276 irq_entry = kzalloc(sizeof(*irq_entry), GFP_ATOMIC);
282 list_add_tail(&irq_entry->list, &active_fds);
283 maybe_sigio_broken(fd);
286 irq_entry->reg[type].id = dev_id;
287 irq_entry->reg[type].irq = irq;
288 irq_entry->reg[type].active = true;
289 irq_entry->reg[type].events = events;
291 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
292 if (um_irq_timetravel_handler_used()) {
293 irq_entry->reg[type].timetravel_handler = timetravel_handler;
294 irq_entry->reg[type].event.fn = irq_event_handler;
298 WARN_ON(!update_irq_entry(irq_entry));
299 spin_unlock_irqrestore(&irq_lock, flags);
303 spin_unlock_irqrestore(&irq_lock, flags);
309 * Remove the entry or entries for a specific FD, if you
310 * don't want to remove all the possible entries then use
311 * um_free_irq() or deactivate_fd() instead.
313 void free_irq_by_fd(int fd)
315 struct irq_entry *to_free;
318 spin_lock_irqsave(&irq_lock, flags);
319 to_free = get_irq_entry_by_fd(fd);
320 free_irq_entry(to_free, true);
321 spin_unlock_irqrestore(&irq_lock, flags);
323 EXPORT_SYMBOL(free_irq_by_fd);
325 static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
327 struct irq_entry *entry;
330 spin_lock_irqsave(&irq_lock, flags);
331 list_for_each_entry(entry, &active_fds, list) {
334 for (i = 0; i < NUM_IRQ_TYPES; i++) {
335 struct irq_reg *reg = &entry->reg[i];
344 os_del_epoll_fd(entry->fd);
346 update_or_free_irq_entry(entry);
351 spin_unlock_irqrestore(&irq_lock, flags);
354 void deactivate_fd(int fd, int irqnum)
356 struct irq_entry *entry;
362 spin_lock_irqsave(&irq_lock, flags);
363 entry = get_irq_entry_by_fd(fd);
367 for (i = 0; i < NUM_IRQ_TYPES; i++) {
368 if (!entry->reg[i].events)
370 if (entry->reg[i].irq == irqnum)
371 entry->reg[i].events = 0;
374 update_or_free_irq_entry(entry);
376 spin_unlock_irqrestore(&irq_lock, flags);
380 EXPORT_SYMBOL(deactivate_fd);
383 * Called just before shutdown in order to provide a clean exec
384 * environment in case the system is rebooting. No locking because
385 * that would cause a pointless shutdown hang if something hadn't
388 int deactivate_all_fds(void)
390 struct irq_entry *entry;
392 /* Stop IO. The IRQ loop has no lock so this is our
393 * only way of making sure we are safe to dispose
394 * of all IRQ handlers
398 /* we can no longer call kfree() here so just deactivate */
399 list_for_each_entry(entry, &active_fds, list)
400 os_del_epoll_fd(entry->fd);
406 * do_IRQ handles all normal device IRQs (the special
407 * SMP cross-CPU interrupts have their own specific
410 unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
412 struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
414 generic_handle_irq(irq);
416 set_irq_regs(old_regs);
420 void um_free_irq(int irq, void *dev)
422 if (WARN(irq < 0 || irq > UM_LAST_SIGNAL_IRQ,
423 "freeing invalid irq %d", irq))
426 free_irq_by_irq_and_dev(irq, dev);
428 clear_bit(irq, irqs_allocated);
430 EXPORT_SYMBOL(um_free_irq);
433 _um_request_irq(int irq, int fd, enum um_irq_type type,
434 irq_handler_t handler, unsigned long irqflags,
435 const char *devname, void *dev_id,
436 void (*timetravel_handler)(int, int, void *,
437 struct time_travel_event *))
441 if (irq == UM_IRQ_ALLOC) {
444 for (i = UM_FIRST_DYN_IRQ; i < NR_IRQS; i++) {
445 if (!test_and_set_bit(i, irqs_allocated)) {
456 err = activate_fd(irq, fd, type, dev_id, timetravel_handler);
461 err = request_irq(irq, handler, irqflags, devname, dev_id);
467 clear_bit(irq, irqs_allocated);
471 int um_request_irq(int irq, int fd, enum um_irq_type type,
472 irq_handler_t handler, unsigned long irqflags,
473 const char *devname, void *dev_id)
475 return _um_request_irq(irq, fd, type, handler, irqflags,
476 devname, dev_id, NULL);
478 EXPORT_SYMBOL(um_request_irq);
480 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
481 int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
482 irq_handler_t handler, unsigned long irqflags,
483 const char *devname, void *dev_id,
484 void (*timetravel_handler)(int, int, void *,
485 struct time_travel_event *))
487 return _um_request_irq(irq, fd, type, handler, irqflags,
488 devname, dev_id, timetravel_handler);
490 EXPORT_SYMBOL(um_request_irq_tt);
492 void sigio_run_timetravel_handlers(void)
494 _sigio_handler(NULL, true);
498 #ifdef CONFIG_PM_SLEEP
499 void um_irqs_suspend(void)
501 struct irq_entry *entry;
504 irqs_suspended = true;
506 spin_lock_irqsave(&irq_lock, flags);
507 list_for_each_entry(entry, &active_fds, list) {
511 for (t = 0; t < NUM_IRQ_TYPES; t++) {
512 if (!entry->reg[t].events)
516 * For the SIGIO_WRITE_IRQ, which is used to handle the
517 * SIGIO workaround thread, we need special handling:
518 * enable wake for it itself, but below we tell it about
519 * any FDs that should be suspended.
521 if (entry->reg[t].wakeup ||
522 entry->reg[t].irq == SIGIO_WRITE_IRQ
523 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
524 || entry->reg[t].timetravel_handler
533 entry->suspended = true;
534 os_clear_fd_async(entry->fd);
535 entry->sigio_workaround =
536 !__ignore_sigio_fd(entry->fd);
539 spin_unlock_irqrestore(&irq_lock, flags);
542 void um_irqs_resume(void)
544 struct irq_entry *entry;
548 local_irq_save(flags);
549 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
551 * We don't need to lock anything here since we're in resume
552 * and nothing else is running, but have disabled IRQs so we
553 * don't try anything else with the interrupt list from there.
555 list_for_each_entry(entry, &active_fds, list) {
558 for (t = 0; t < NUM_IRQ_TYPES; t++) {
559 struct irq_reg *reg = &entry->reg[t];
561 if (reg->pending_on_resume) {
563 generic_handle_irq(reg->irq);
565 reg->pending_on_resume = false;
571 spin_lock(&irq_lock);
572 list_for_each_entry(entry, &active_fds, list) {
573 if (entry->suspended) {
574 int err = os_set_fd_async(entry->fd);
576 WARN(err < 0, "os_set_fd_async returned %d\n", err);
577 entry->suspended = false;
579 if (entry->sigio_workaround) {
580 err = __add_sigio_fd(entry->fd);
581 WARN(err < 0, "add_sigio_returned %d\n", err);
585 spin_unlock_irqrestore(&irq_lock, flags);
587 irqs_suspended = false;
588 send_sigio_to_self();
591 static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
593 struct irq_entry *entry;
596 spin_lock_irqsave(&irq_lock, flags);
597 list_for_each_entry(entry, &active_fds, list) {
600 for (t = 0; t < NUM_IRQ_TYPES; t++) {
601 if (!entry->reg[t].events)
604 if (entry->reg[t].irq != d->irq)
606 entry->reg[t].wakeup = on;
611 spin_unlock_irqrestore(&irq_lock, flags);
615 #define normal_irq_set_wake NULL
619 * irq_chip must define at least enable/disable and ack when
620 * the edge handler is used.
622 static void dummy(struct irq_data *d)
626 /* This is used for everything other than the timer. */
627 static struct irq_chip normal_irq_type = {
629 .irq_disable = dummy,
634 .irq_set_wake = normal_irq_set_wake,
637 static struct irq_chip alarm_irq_type = {
639 .irq_disable = dummy,
646 void __init init_IRQ(void)
650 irq_set_chip_and_handler(TIMER_IRQ, &alarm_irq_type, handle_edge_irq);
652 for (i = 1; i < UM_LAST_SIGNAL_IRQ; i++)
653 irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
654 /* Initialize EPOLL Loop */
659 * IRQ stack entry and exit:
661 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
662 * and switch over to the IRQ stack after some preparation. We use
663 * sigaltstack to receive signals on a separate stack from the start.
664 * These two functions make sure the rest of the kernel won't be too
665 * upset by being on a different stack. The IRQ stack has a
666 * thread_info structure at the bottom so that current et al continue
669 * to_irq_stack copies the current task's thread_info to the IRQ stack
670 * thread_info and sets the tasks's stack to point to the IRQ stack.
672 * from_irq_stack copies the thread_info struct back (flags may have
673 * been modified) and resets the task's stack pointer.
677 * What happens when two signals race each other? UML doesn't block
678 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
679 * could arrive while a previous one is still setting up the
682 * There are three cases -
683 * The first interrupt on the stack - sets up the thread_info and
684 * handles the interrupt
685 * A nested interrupt interrupting the copying of the thread_info -
686 * can't handle the interrupt, as the stack is in an unknown state
687 * A nested interrupt not interrupting the copying of the
688 * thread_info - doesn't do any setup, just handles the interrupt
690 * The first job is to figure out whether we interrupted stack setup.
691 * This is done by xchging the signal mask with thread_info->pending.
692 * If the value that comes back is zero, then there is no setup in
693 * progress, and the interrupt can be handled. If the value is
694 * non-zero, then there is stack setup in progress. In order to have
695 * the interrupt handled, we leave our signal in the mask, and it will
696 * be handled by the upper handler after it has set up the stack.
698 * Next is to figure out whether we are the outer handler or a nested
699 * one. As part of setting up the stack, thread_info->real_thread is
700 * set to non-NULL (and is reset to NULL on exit). This is the
701 * nesting indicator. If it is non-NULL, then the stack is already
702 * set up and the handler can run.
705 static unsigned long pending_mask;
707 unsigned long to_irq_stack(unsigned long *mask_out)
709 struct thread_info *ti;
710 unsigned long mask, old;
713 mask = xchg(&pending_mask, *mask_out);
716 * If any interrupts come in at this point, we want to
717 * make sure that their bits aren't lost by our
718 * putting our bit in. So, this loop accumulates bits
719 * until xchg returns the same value that we put in.
720 * When that happens, there were no new interrupts,
721 * and pending_mask contains a bit for each interrupt
727 mask = xchg(&pending_mask, old);
728 } while (mask != old);
732 ti = current_thread_info();
733 nested = (ti->real_thread != NULL);
735 struct task_struct *task;
736 struct thread_info *tti;
738 task = cpu_tasks[ti->cpu].task;
739 tti = task_thread_info(task);
742 ti->real_thread = tti;
746 mask = xchg(&pending_mask, 0);
747 *mask_out |= mask | nested;
751 unsigned long from_irq_stack(int nested)
753 struct thread_info *ti, *to;
756 ti = current_thread_info();
760 to = ti->real_thread;
762 ti->real_thread = NULL;
765 mask = xchg(&pending_mask, 0);