2 * Copyright (C) 2017 - Cambridge Greys Ltd
3 * Copyright (C) 2011 - 2014 Cisco Systems Inc
4 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
5 * Licensed under the GPL
6 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
7 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
18 #include <as-layout.h>
19 #include <kern_util.h>
24 extern void free_irqs(void);
26 /* When epoll triggers we do not know why it did so
27 * we can also have different IRQs for read and write.
28 * This is why we keep a small irq_fd array for each fd -
29 * one entry per IRQ type
33 struct irq_entry *next;
35 struct irq_fd *irq_array[MAX_IRQ_TYPE + 1];
38 static struct irq_entry *active_fds;
40 static DEFINE_SPINLOCK(irq_lock);
42 static void irq_io_loop(struct irq_fd *irq, struct uml_pt_regs *regs)
45 * irq->active guards against reentry
46 * irq->pending accumulates pending requests
47 * if pending is raised the irq_handler is re-run
48 * until pending is cleared
54 do_IRQ(irq->irq, regs);
55 } while (irq->pending && (!irq->purge));
63 void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
65 struct irq_entry *irq_entry;
71 /* This is now lockless - epoll keeps back-referencesto the irqs
72 * which have trigger it so there is no need to walk the irq
73 * list and lock it every time. We avoid locking by turning off
74 * IO for a specific fd by executing os_del_epoll_fd(fd) before
75 * we do any changes to the actual data structures
77 n = os_waiting_for_events_epoll();
86 for (i = 0; i < n ; i++) {
87 /* Epoll back reference is the entry with 3 irq_fd
88 * leaves - one for each irq type.
90 irq_entry = (struct irq_entry *)
91 os_epoll_get_data_pointer(i);
92 for (j = 0; j < MAX_IRQ_TYPE ; j++) {
93 irq = irq_entry->irq_array[j];
96 if (os_epoll_triggered(i, irq->events) > 0)
97 irq_io_loop(irq, regs);
99 irq_entry->irq_array[j] = NULL;
109 static int assign_epoll_events_to_irq(struct irq_entry *irq_entry)
115 for (i = 0; i < MAX_IRQ_TYPE ; i++) {
116 irq = irq_entry->irq_array[i];
118 events = irq->events | events;
121 /* os_add_epoll will call os_mod_epoll if this already exists */
122 return os_add_epoll_fd(events, irq_entry->fd, irq_entry);
124 /* No events - delete */
125 return os_del_epoll_fd(irq_entry->fd);
130 static int activate_fd(int irq, int fd, int type, void *dev_id)
132 struct irq_fd *new_fd;
133 struct irq_entry *irq_entry;
137 err = os_set_fd_async(fd);
141 spin_lock_irqsave(&irq_lock, flags);
143 /* Check if we have an entry for this fd */
146 for (irq_entry = active_fds;
147 irq_entry != NULL; irq_entry = irq_entry->next) {
148 if (irq_entry->fd == fd)
152 if (irq_entry == NULL) {
153 /* This needs to be atomic as it may be called from an
156 irq_entry = kmalloc(sizeof(struct irq_entry), GFP_ATOMIC);
157 if (irq_entry == NULL) {
159 "Failed to allocate new IRQ entry\n");
163 for (i = 0; i < MAX_IRQ_TYPE; i++)
164 irq_entry->irq_array[i] = NULL;
165 irq_entry->next = active_fds;
166 active_fds = irq_entry;
169 /* Check if we are trying to re-register an interrupt for a
173 if (irq_entry->irq_array[type] != NULL) {
175 "Trying to reregister IRQ %d FD %d TYPE %d ID %p\n",
176 irq, fd, type, dev_id
180 /* New entry for this fd */
183 new_fd = kmalloc(sizeof(struct irq_fd), GFP_ATOMIC);
187 events = os_event_mask(type);
189 *new_fd = ((struct irq_fd) {
198 /* Turn off any IO on this fd - allows us to
199 * avoid locking the IRQ loop
201 os_del_epoll_fd(irq_entry->fd);
202 irq_entry->irq_array[type] = new_fd;
205 /* Turn back IO on with the correct (new) IO event mask */
206 assign_epoll_events_to_irq(irq_entry);
207 spin_unlock_irqrestore(&irq_lock, flags);
208 maybe_sigio_broken(fd, (type != IRQ_NONE));
212 spin_unlock_irqrestore(&irq_lock, flags);
218 * Walk the IRQ list and dispose of any unused entries.
219 * Should be done under irq_lock.
222 static void garbage_collect_irq_entries(void)
226 struct irq_entry *walk;
227 struct irq_entry *previous = NULL;
228 struct irq_entry *to_free;
230 if (active_fds == NULL)
233 while (walk != NULL) {
235 for (i = 0; i < MAX_IRQ_TYPE ; i++) {
236 if (walk->irq_array[i] != NULL) {
242 if (previous == NULL)
243 active_fds = walk->next;
245 previous->next = walk->next;
257 * Walk the IRQ list and get the descriptor for our FD
260 static struct irq_entry *get_irq_entry_by_fd(int fd)
262 struct irq_entry *walk = active_fds;
264 while (walk != NULL) {
274 * Walk the IRQ list and dispose of an entry for a specific
275 * device, fd and number. Note - if sharing an IRQ for read
276 * and writefor the same FD it will be disposed in either case.
277 * If this behaviour is undesirable use different IRQ ids.
281 #define IGNORE_DEV (1<<1)
283 static void do_free_by_irq_and_dev(
284 struct irq_entry *irq_entry,
291 struct irq_fd *to_free;
293 for (i = 0; i < MAX_IRQ_TYPE ; i++) {
294 if (irq_entry->irq_array[i] != NULL) {
296 ((flags & IGNORE_IRQ) ||
297 (irq_entry->irq_array[i]->irq == irq)) &&
298 ((flags & IGNORE_DEV) ||
299 (irq_entry->irq_array[i]->id == dev))
301 /* Turn off any IO on this fd - allows us to
302 * avoid locking the IRQ loop
304 os_del_epoll_fd(irq_entry->fd);
305 to_free = irq_entry->irq_array[i];
306 irq_entry->irq_array[i] = NULL;
307 assign_epoll_events_to_irq(irq_entry);
309 to_free->purge = true;
317 void free_irq_by_fd(int fd)
319 struct irq_entry *to_free;
322 spin_lock_irqsave(&irq_lock, flags);
323 to_free = get_irq_entry_by_fd(fd);
324 if (to_free != NULL) {
325 do_free_by_irq_and_dev(
329 IGNORE_IRQ | IGNORE_DEV
332 garbage_collect_irq_entries();
333 spin_unlock_irqrestore(&irq_lock, flags);
335 EXPORT_SYMBOL(free_irq_by_fd);
337 static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
339 struct irq_entry *to_free;
342 spin_lock_irqsave(&irq_lock, flags);
343 to_free = active_fds;
344 while (to_free != NULL) {
345 do_free_by_irq_and_dev(
351 to_free = to_free->next;
353 garbage_collect_irq_entries();
354 spin_unlock_irqrestore(&irq_lock, flags);
358 void reactivate_fd(int fd, int irqnum)
360 /** NOP - we do auto-EOI now **/
363 void deactivate_fd(int fd, int irqnum)
365 struct irq_entry *to_free;
369 spin_lock_irqsave(&irq_lock, flags);
370 to_free = get_irq_entry_by_fd(fd);
371 if (to_free != NULL) {
372 do_free_by_irq_and_dev(
379 garbage_collect_irq_entries();
380 spin_unlock_irqrestore(&irq_lock, flags);
383 EXPORT_SYMBOL(deactivate_fd);
386 * Called just before shutdown in order to provide a clean exec
387 * environment in case the system is rebooting. No locking because
388 * that would cause a pointless shutdown hang if something hadn't
391 int deactivate_all_fds(void)
394 struct irq_entry *to_free;
396 spin_lock_irqsave(&irq_lock, flags);
397 /* Stop IO. The IRQ loop has no lock so this is our
398 * only way of making sure we are safe to dispose
399 * of all IRQ handlers
402 to_free = active_fds;
403 while (to_free != NULL) {
404 do_free_by_irq_and_dev(
408 IGNORE_IRQ | IGNORE_DEV
410 to_free = to_free->next;
412 garbage_collect_irq_entries();
413 spin_unlock_irqrestore(&irq_lock, flags);
419 * do_IRQ handles all normal device IRQs (the special
420 * SMP cross-CPU interrupts have their own specific
423 unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
425 struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
427 generic_handle_irq(irq);
429 set_irq_regs(old_regs);
433 void um_free_irq(unsigned int irq, void *dev)
435 free_irq_by_irq_and_dev(irq, dev);
438 EXPORT_SYMBOL(um_free_irq);
440 int um_request_irq(unsigned int irq, int fd, int type,
441 irq_handler_t handler,
442 unsigned long irqflags, const char * devname,
448 err = activate_fd(irq, fd, type, dev_id);
453 return request_irq(irq, handler, irqflags, devname, dev_id);
456 EXPORT_SYMBOL(um_request_irq);
457 EXPORT_SYMBOL(reactivate_fd);
460 * irq_chip must define at least enable/disable and ack when
461 * the edge handler is used.
463 static void dummy(struct irq_data *d)
467 /* This is used for everything else than the timer. */
468 static struct irq_chip normal_irq_type = {
470 .irq_disable = dummy,
477 static struct irq_chip SIGVTALRM_irq_type = {
479 .irq_disable = dummy,
486 void __init init_IRQ(void)
490 irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
493 for (i = 1; i < NR_IRQS; i++)
494 irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
495 /* Initialize EPOLL Loop */
500 * IRQ stack entry and exit:
502 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
503 * and switch over to the IRQ stack after some preparation. We use
504 * sigaltstack to receive signals on a separate stack from the start.
505 * These two functions make sure the rest of the kernel won't be too
506 * upset by being on a different stack. The IRQ stack has a
507 * thread_info structure at the bottom so that current et al continue
510 * to_irq_stack copies the current task's thread_info to the IRQ stack
511 * thread_info and sets the tasks's stack to point to the IRQ stack.
513 * from_irq_stack copies the thread_info struct back (flags may have
514 * been modified) and resets the task's stack pointer.
518 * What happens when two signals race each other? UML doesn't block
519 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
520 * could arrive while a previous one is still setting up the
523 * There are three cases -
524 * The first interrupt on the stack - sets up the thread_info and
525 * handles the interrupt
526 * A nested interrupt interrupting the copying of the thread_info -
527 * can't handle the interrupt, as the stack is in an unknown state
528 * A nested interrupt not interrupting the copying of the
529 * thread_info - doesn't do any setup, just handles the interrupt
531 * The first job is to figure out whether we interrupted stack setup.
532 * This is done by xchging the signal mask with thread_info->pending.
533 * If the value that comes back is zero, then there is no setup in
534 * progress, and the interrupt can be handled. If the value is
535 * non-zero, then there is stack setup in progress. In order to have
536 * the interrupt handled, we leave our signal in the mask, and it will
537 * be handled by the upper handler after it has set up the stack.
539 * Next is to figure out whether we are the outer handler or a nested
540 * one. As part of setting up the stack, thread_info->real_thread is
541 * set to non-NULL (and is reset to NULL on exit). This is the
542 * nesting indicator. If it is non-NULL, then the stack is already
543 * set up and the handler can run.
546 static unsigned long pending_mask;
548 unsigned long to_irq_stack(unsigned long *mask_out)
550 struct thread_info *ti;
551 unsigned long mask, old;
554 mask = xchg(&pending_mask, *mask_out);
557 * If any interrupts come in at this point, we want to
558 * make sure that their bits aren't lost by our
559 * putting our bit in. So, this loop accumulates bits
560 * until xchg returns the same value that we put in.
561 * When that happens, there were no new interrupts,
562 * and pending_mask contains a bit for each interrupt
568 mask = xchg(&pending_mask, old);
569 } while (mask != old);
573 ti = current_thread_info();
574 nested = (ti->real_thread != NULL);
576 struct task_struct *task;
577 struct thread_info *tti;
579 task = cpu_tasks[ti->cpu].task;
580 tti = task_thread_info(task);
583 ti->real_thread = tti;
587 mask = xchg(&pending_mask, 0);
588 *mask_out |= mask | nested;
592 unsigned long from_irq_stack(int nested)
594 struct thread_info *ti, *to;
597 ti = current_thread_info();
601 to = ti->real_thread;
603 ti->real_thread = NULL;
606 mask = xchg(&pending_mask, 0);