1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 - Cambridge Greys Ltd
4 * Copyright (C) 2011 - 2014 Cisco Systems Inc
5 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
7 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
18 #include <as-layout.h>
19 #include <kern_util.h>
23 #include <linux/time-internal.h>
26 /* When epoll triggers we do not know why it did so
27 * we can also have different IRQs for read and write.
28 * This is why we keep a small irq_reg array for each fd -
29 * one entry per IRQ type
34 /* it's cheaper to store this than to query it */
39 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
40 bool pending_on_resume;
41 void (*timetravel_handler)(int, int, void *,
42 struct time_travel_event *);
43 struct time_travel_event event;
48 struct list_head list;
50 struct irq_reg reg[NUM_IRQ_TYPES];
52 bool sigio_workaround;
55 static DEFINE_SPINLOCK(irq_lock);
56 static LIST_HEAD(active_fds);
57 static DECLARE_BITMAP(irqs_allocated, UM_LAST_SIGNAL_IRQ);
58 static bool irqs_suspended;
60 static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
63 * irq->active guards against reentry
64 * irq->pending accumulates pending requests
65 * if pending is raised the irq_handler is re-run
66 * until pending is cleared
73 do_IRQ(irq->irq, regs);
74 } while (irq->pending);
82 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
83 static void irq_event_handler(struct time_travel_event *ev)
85 struct irq_reg *reg = container_of(ev, struct irq_reg, event);
87 /* do nothing if suspended - just to cause a wakeup */
91 generic_handle_irq(reg->irq);
94 static bool irq_do_timetravel_handler(struct irq_entry *entry,
97 struct irq_reg *reg = &entry->reg[t];
99 if (!reg->timetravel_handler)
103 * Handle all messages - we might get multiple even while
104 * interrupts are already suspended, due to suspend order
105 * etc. Note that time_travel_add_irq_event() will not add
106 * an event twice, if it's pending already "first wins".
108 reg->timetravel_handler(reg->irq, entry->fd, reg->id, ®->event);
110 if (!reg->event.pending)
114 reg->pending_on_resume = true;
118 static bool irq_do_timetravel_handler(struct irq_entry *entry,
125 static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t,
126 struct uml_pt_regs *regs,
127 bool timetravel_handlers_only)
129 struct irq_reg *reg = &entry->reg[t];
134 if (os_epoll_triggered(idx, reg->events) <= 0)
137 if (irq_do_timetravel_handler(entry, t))
141 * If we're called to only run time-travel handlers then don't
142 * actually proceed but mark sigio as pending (if applicable).
143 * For suspend/resume, timetravel_handlers_only may be true
144 * despite time-travel not being configured and used.
146 if (timetravel_handlers_only) {
147 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
148 mark_sigio_pending();
153 irq_io_loop(reg, regs);
156 static void _sigio_handler(struct uml_pt_regs *regs,
157 bool timetravel_handlers_only)
159 struct irq_entry *irq_entry;
162 if (timetravel_handlers_only && !um_irq_timetravel_handler_used())
166 /* This is now lockless - epoll keeps back-referencesto the irqs
167 * which have trigger it so there is no need to walk the irq
168 * list and lock it every time. We avoid locking by turning off
169 * IO for a specific fd by executing os_del_epoll_fd(fd) before
170 * we do any changes to the actual data structures
172 n = os_waiting_for_events_epoll();
181 for (i = 0; i < n ; i++) {
184 irq_entry = os_epoll_get_data_pointer(i);
186 for (t = 0; t < NUM_IRQ_TYPES; t++)
187 sigio_reg_handler(i, irq_entry, t, regs,
188 timetravel_handlers_only);
192 if (!timetravel_handlers_only)
196 void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
198 _sigio_handler(regs, irqs_suspended);
201 static struct irq_entry *get_irq_entry_by_fd(int fd)
203 struct irq_entry *walk;
205 lockdep_assert_held(&irq_lock);
207 list_for_each_entry(walk, &active_fds, list) {
215 static void free_irq_entry(struct irq_entry *to_free, bool remove)
221 os_del_epoll_fd(to_free->fd);
222 list_del(&to_free->list);
226 static bool update_irq_entry(struct irq_entry *entry)
231 for (i = 0; i < NUM_IRQ_TYPES; i++)
232 events |= entry->reg[i].events;
235 /* will modify (instead of add) if needed */
236 os_add_epoll_fd(events, entry->fd, entry);
240 os_del_epoll_fd(entry->fd);
244 static void update_or_free_irq_entry(struct irq_entry *entry)
246 if (!update_irq_entry(entry))
247 free_irq_entry(entry, false);
250 static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
251 void (*timetravel_handler)(int, int, void *,
252 struct time_travel_event *))
254 struct irq_entry *irq_entry;
255 int err, events = os_event_mask(type);
258 err = os_set_fd_async(fd);
262 spin_lock_irqsave(&irq_lock, flags);
263 irq_entry = get_irq_entry_by_fd(fd);
265 /* cannot register the same FD twice with the same type */
266 if (WARN_ON(irq_entry->reg[type].events)) {
271 /* temporarily disable to avoid IRQ-side locking */
274 irq_entry = kzalloc(sizeof(*irq_entry), GFP_ATOMIC);
280 list_add_tail(&irq_entry->list, &active_fds);
281 maybe_sigio_broken(fd);
284 irq_entry->reg[type].id = dev_id;
285 irq_entry->reg[type].irq = irq;
286 irq_entry->reg[type].active = true;
287 irq_entry->reg[type].events = events;
289 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
290 if (um_irq_timetravel_handler_used()) {
291 irq_entry->reg[type].timetravel_handler = timetravel_handler;
292 irq_entry->reg[type].event.fn = irq_event_handler;
296 WARN_ON(!update_irq_entry(irq_entry));
297 spin_unlock_irqrestore(&irq_lock, flags);
301 spin_unlock_irqrestore(&irq_lock, flags);
307 * Remove the entry or entries for a specific FD, if you
308 * don't want to remove all the possible entries then use
309 * um_free_irq() or deactivate_fd() instead.
311 void free_irq_by_fd(int fd)
313 struct irq_entry *to_free;
316 spin_lock_irqsave(&irq_lock, flags);
317 to_free = get_irq_entry_by_fd(fd);
318 free_irq_entry(to_free, true);
319 spin_unlock_irqrestore(&irq_lock, flags);
321 EXPORT_SYMBOL(free_irq_by_fd);
323 static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
325 struct irq_entry *entry;
328 spin_lock_irqsave(&irq_lock, flags);
329 list_for_each_entry(entry, &active_fds, list) {
332 for (i = 0; i < NUM_IRQ_TYPES; i++) {
333 struct irq_reg *reg = &entry->reg[i];
342 os_del_epoll_fd(entry->fd);
344 update_or_free_irq_entry(entry);
349 spin_unlock_irqrestore(&irq_lock, flags);
352 void deactivate_fd(int fd, int irqnum)
354 struct irq_entry *entry;
360 spin_lock_irqsave(&irq_lock, flags);
361 entry = get_irq_entry_by_fd(fd);
365 for (i = 0; i < NUM_IRQ_TYPES; i++) {
366 if (!entry->reg[i].events)
368 if (entry->reg[i].irq == irqnum)
369 entry->reg[i].events = 0;
372 update_or_free_irq_entry(entry);
374 spin_unlock_irqrestore(&irq_lock, flags);
378 EXPORT_SYMBOL(deactivate_fd);
381 * Called just before shutdown in order to provide a clean exec
382 * environment in case the system is rebooting. No locking because
383 * that would cause a pointless shutdown hang if something hadn't
386 int deactivate_all_fds(void)
388 struct irq_entry *entry;
390 /* Stop IO. The IRQ loop has no lock so this is our
391 * only way of making sure we are safe to dispose
392 * of all IRQ handlers
396 /* we can no longer call kfree() here so just deactivate */
397 list_for_each_entry(entry, &active_fds, list)
398 os_del_epoll_fd(entry->fd);
404 * do_IRQ handles all normal device IRQs (the special
405 * SMP cross-CPU interrupts have their own specific
408 unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
410 struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
412 generic_handle_irq(irq);
414 set_irq_regs(old_regs);
418 void um_free_irq(int irq, void *dev)
420 if (WARN(irq < 0 || irq > UM_LAST_SIGNAL_IRQ,
421 "freeing invalid irq %d", irq))
424 free_irq_by_irq_and_dev(irq, dev);
426 clear_bit(irq, irqs_allocated);
428 EXPORT_SYMBOL(um_free_irq);
431 _um_request_irq(int irq, int fd, enum um_irq_type type,
432 irq_handler_t handler, unsigned long irqflags,
433 const char *devname, void *dev_id,
434 void (*timetravel_handler)(int, int, void *,
435 struct time_travel_event *))
439 if (irq == UM_IRQ_ALLOC) {
442 for (i = UM_FIRST_DYN_IRQ; i < NR_IRQS; i++) {
443 if (!test_and_set_bit(i, irqs_allocated)) {
454 err = activate_fd(irq, fd, type, dev_id, timetravel_handler);
459 err = request_irq(irq, handler, irqflags, devname, dev_id);
465 clear_bit(irq, irqs_allocated);
469 int um_request_irq(int irq, int fd, enum um_irq_type type,
470 irq_handler_t handler, unsigned long irqflags,
471 const char *devname, void *dev_id)
473 return _um_request_irq(irq, fd, type, handler, irqflags,
474 devname, dev_id, NULL);
476 EXPORT_SYMBOL(um_request_irq);
478 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
479 int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
480 irq_handler_t handler, unsigned long irqflags,
481 const char *devname, void *dev_id,
482 void (*timetravel_handler)(int, int, void *,
483 struct time_travel_event *))
485 return _um_request_irq(irq, fd, type, handler, irqflags,
486 devname, dev_id, timetravel_handler);
488 EXPORT_SYMBOL(um_request_irq_tt);
490 void sigio_run_timetravel_handlers(void)
492 _sigio_handler(NULL, true);
496 #ifdef CONFIG_PM_SLEEP
497 void um_irqs_suspend(void)
499 struct irq_entry *entry;
502 irqs_suspended = true;
504 spin_lock_irqsave(&irq_lock, flags);
505 list_for_each_entry(entry, &active_fds, list) {
509 for (t = 0; t < NUM_IRQ_TYPES; t++) {
510 if (!entry->reg[t].events)
514 * For the SIGIO_WRITE_IRQ, which is used to handle the
515 * SIGIO workaround thread, we need special handling:
516 * enable wake for it itself, but below we tell it about
517 * any FDs that should be suspended.
519 if (entry->reg[t].wakeup ||
520 entry->reg[t].irq == SIGIO_WRITE_IRQ
521 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
522 || entry->reg[t].timetravel_handler
531 entry->suspended = true;
532 os_clear_fd_async(entry->fd);
533 entry->sigio_workaround =
534 !__ignore_sigio_fd(entry->fd);
537 spin_unlock_irqrestore(&irq_lock, flags);
540 void um_irqs_resume(void)
542 struct irq_entry *entry;
546 local_irq_save(flags);
547 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
549 * We don't need to lock anything here since we're in resume
550 * and nothing else is running, but have disabled IRQs so we
551 * don't try anything else with the interrupt list from there.
553 list_for_each_entry(entry, &active_fds, list) {
556 for (t = 0; t < NUM_IRQ_TYPES; t++) {
557 struct irq_reg *reg = &entry->reg[t];
559 if (reg->pending_on_resume) {
561 generic_handle_irq(reg->irq);
563 reg->pending_on_resume = false;
569 spin_lock(&irq_lock);
570 list_for_each_entry(entry, &active_fds, list) {
571 if (entry->suspended) {
572 int err = os_set_fd_async(entry->fd);
574 WARN(err < 0, "os_set_fd_async returned %d\n", err);
575 entry->suspended = false;
577 if (entry->sigio_workaround) {
578 err = __add_sigio_fd(entry->fd);
579 WARN(err < 0, "add_sigio_returned %d\n", err);
583 spin_unlock_irqrestore(&irq_lock, flags);
585 irqs_suspended = false;
586 send_sigio_to_self();
589 static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
591 struct irq_entry *entry;
594 spin_lock_irqsave(&irq_lock, flags);
595 list_for_each_entry(entry, &active_fds, list) {
598 for (t = 0; t < NUM_IRQ_TYPES; t++) {
599 if (!entry->reg[t].events)
602 if (entry->reg[t].irq != d->irq)
604 entry->reg[t].wakeup = on;
609 spin_unlock_irqrestore(&irq_lock, flags);
613 #define normal_irq_set_wake NULL
617 * irq_chip must define at least enable/disable and ack when
618 * the edge handler is used.
620 static void dummy(struct irq_data *d)
624 /* This is used for everything other than the timer. */
625 static struct irq_chip normal_irq_type = {
627 .irq_disable = dummy,
632 .irq_set_wake = normal_irq_set_wake,
635 static struct irq_chip alarm_irq_type = {
637 .irq_disable = dummy,
644 void __init init_IRQ(void)
648 irq_set_chip_and_handler(TIMER_IRQ, &alarm_irq_type, handle_edge_irq);
650 for (i = 1; i < UM_LAST_SIGNAL_IRQ; i++)
651 irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
652 /* Initialize EPOLL Loop */
657 * IRQ stack entry and exit:
659 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
660 * and switch over to the IRQ stack after some preparation. We use
661 * sigaltstack to receive signals on a separate stack from the start.
662 * These two functions make sure the rest of the kernel won't be too
663 * upset by being on a different stack. The IRQ stack has a
664 * thread_info structure at the bottom so that current et al continue
667 * to_irq_stack copies the current task's thread_info to the IRQ stack
668 * thread_info and sets the tasks's stack to point to the IRQ stack.
670 * from_irq_stack copies the thread_info struct back (flags may have
671 * been modified) and resets the task's stack pointer.
675 * What happens when two signals race each other? UML doesn't block
676 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
677 * could arrive while a previous one is still setting up the
680 * There are three cases -
681 * The first interrupt on the stack - sets up the thread_info and
682 * handles the interrupt
683 * A nested interrupt interrupting the copying of the thread_info -
684 * can't handle the interrupt, as the stack is in an unknown state
685 * A nested interrupt not interrupting the copying of the
686 * thread_info - doesn't do any setup, just handles the interrupt
688 * The first job is to figure out whether we interrupted stack setup.
689 * This is done by xchging the signal mask with thread_info->pending.
690 * If the value that comes back is zero, then there is no setup in
691 * progress, and the interrupt can be handled. If the value is
692 * non-zero, then there is stack setup in progress. In order to have
693 * the interrupt handled, we leave our signal in the mask, and it will
694 * be handled by the upper handler after it has set up the stack.
696 * Next is to figure out whether we are the outer handler or a nested
697 * one. As part of setting up the stack, thread_info->real_thread is
698 * set to non-NULL (and is reset to NULL on exit). This is the
699 * nesting indicator. If it is non-NULL, then the stack is already
700 * set up and the handler can run.
703 static unsigned long pending_mask;
705 unsigned long to_irq_stack(unsigned long *mask_out)
707 struct thread_info *ti;
708 unsigned long mask, old;
711 mask = xchg(&pending_mask, *mask_out);
714 * If any interrupts come in at this point, we want to
715 * make sure that their bits aren't lost by our
716 * putting our bit in. So, this loop accumulates bits
717 * until xchg returns the same value that we put in.
718 * When that happens, there were no new interrupts,
719 * and pending_mask contains a bit for each interrupt
725 mask = xchg(&pending_mask, old);
726 } while (mask != old);
730 ti = current_thread_info();
731 nested = (ti->real_thread != NULL);
733 struct task_struct *task;
734 struct thread_info *tti;
736 task = cpu_tasks[ti->cpu].task;
737 tti = task_thread_info(task);
740 ti->real_thread = tti;
744 mask = xchg(&pending_mask, 0);
745 *mask_out |= mask | nested;
749 unsigned long from_irq_stack(int nested)
751 struct thread_info *ti, *to;
754 ti = current_thread_info();
758 to = ti->real_thread;
760 ti->real_thread = NULL;
763 mask = xchg(&pending_mask, 0);