GNU Linux-libre 6.7.9-gnu
[releases.git] / arch / um / kernel / irq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 - Cambridge Greys Ltd
4  * Copyright (C) 2011 - 2014 Cisco Systems Inc
5  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6  * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
7  *      Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
8  */
9
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
18 #include <as-layout.h>
19 #include <kern_util.h>
20 #include <os.h>
21 #include <irq_user.h>
22 #include <irq_kern.h>
23 #include <linux/time-internal.h>
24
25
26 /* When epoll triggers we do not know why it did so
27  * we can also have different IRQs for read and write.
28  * This is why we keep a small irq_reg array for each fd -
29  * one entry per IRQ type
30  */
31 struct irq_reg {
32         void *id;
33         int irq;
34         /* it's cheaper to store this than to query it */
35         int events;
36         bool active;
37         bool pending;
38         bool wakeup;
39 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
40         bool pending_on_resume;
41         void (*timetravel_handler)(int, int, void *,
42                                    struct time_travel_event *);
43         struct time_travel_event event;
44 #endif
45 };
46
47 struct irq_entry {
48         struct list_head list;
49         int fd;
50         struct irq_reg reg[NUM_IRQ_TYPES];
51         bool suspended;
52         bool sigio_workaround;
53 };
54
55 static DEFINE_SPINLOCK(irq_lock);
56 static LIST_HEAD(active_fds);
57 static DECLARE_BITMAP(irqs_allocated, UM_LAST_SIGNAL_IRQ);
58 static bool irqs_suspended;
59
60 static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
61 {
62 /*
63  * irq->active guards against reentry
64  * irq->pending accumulates pending requests
65  * if pending is raised the irq_handler is re-run
66  * until pending is cleared
67  */
68         if (irq->active) {
69                 irq->active = false;
70
71                 do {
72                         irq->pending = false;
73                         do_IRQ(irq->irq, regs);
74                 } while (irq->pending);
75
76                 irq->active = true;
77         } else {
78                 irq->pending = true;
79         }
80 }
81
82 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
83 static void irq_event_handler(struct time_travel_event *ev)
84 {
85         struct irq_reg *reg = container_of(ev, struct irq_reg, event);
86
87         /* do nothing if suspended - just to cause a wakeup */
88         if (irqs_suspended)
89                 return;
90
91         generic_handle_irq(reg->irq);
92 }
93
94 static bool irq_do_timetravel_handler(struct irq_entry *entry,
95                                       enum um_irq_type t)
96 {
97         struct irq_reg *reg = &entry->reg[t];
98
99         if (!reg->timetravel_handler)
100                 return false;
101
102         /*
103          * Handle all messages - we might get multiple even while
104          * interrupts are already suspended, due to suspend order
105          * etc. Note that time_travel_add_irq_event() will not add
106          * an event twice, if it's pending already "first wins".
107          */
108         reg->timetravel_handler(reg->irq, entry->fd, reg->id, &reg->event);
109
110         if (!reg->event.pending)
111                 return false;
112
113         if (irqs_suspended)
114                 reg->pending_on_resume = true;
115         return true;
116 }
117 #else
118 static bool irq_do_timetravel_handler(struct irq_entry *entry,
119                                       enum um_irq_type t)
120 {
121         return false;
122 }
123 #endif
124
125 static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t,
126                               struct uml_pt_regs *regs,
127                               bool timetravel_handlers_only)
128 {
129         struct irq_reg *reg = &entry->reg[t];
130
131         if (!reg->events)
132                 return;
133
134         if (os_epoll_triggered(idx, reg->events) <= 0)
135                 return;
136
137         if (irq_do_timetravel_handler(entry, t))
138                 return;
139
140         /*
141          * If we're called to only run time-travel handlers then don't
142          * actually proceed but mark sigio as pending (if applicable).
143          * For suspend/resume, timetravel_handlers_only may be true
144          * despite time-travel not being configured and used.
145          */
146         if (timetravel_handlers_only) {
147 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
148                 mark_sigio_pending();
149 #endif
150                 return;
151         }
152
153         irq_io_loop(reg, regs);
154 }
155
156 static void _sigio_handler(struct uml_pt_regs *regs,
157                            bool timetravel_handlers_only)
158 {
159         struct irq_entry *irq_entry;
160         int n, i;
161
162         if (timetravel_handlers_only && !um_irq_timetravel_handler_used())
163                 return;
164
165         while (1) {
166                 /* This is now lockless - epoll keeps back-referencesto the irqs
167                  * which have trigger it so there is no need to walk the irq
168                  * list and lock it every time. We avoid locking by turning off
169                  * IO for a specific fd by executing os_del_epoll_fd(fd) before
170                  * we do any changes to the actual data structures
171                  */
172                 n = os_waiting_for_events_epoll();
173
174                 if (n <= 0) {
175                         if (n == -EINTR)
176                                 continue;
177                         else
178                                 break;
179                 }
180
181                 for (i = 0; i < n ; i++) {
182                         enum um_irq_type t;
183
184                         irq_entry = os_epoll_get_data_pointer(i);
185
186                         for (t = 0; t < NUM_IRQ_TYPES; t++)
187                                 sigio_reg_handler(i, irq_entry, t, regs,
188                                                   timetravel_handlers_only);
189                 }
190         }
191
192         if (!timetravel_handlers_only)
193                 free_irqs();
194 }
195
196 void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
197 {
198         _sigio_handler(regs, irqs_suspended);
199 }
200
201 static struct irq_entry *get_irq_entry_by_fd(int fd)
202 {
203         struct irq_entry *walk;
204
205         lockdep_assert_held(&irq_lock);
206
207         list_for_each_entry(walk, &active_fds, list) {
208                 if (walk->fd == fd)
209                         return walk;
210         }
211
212         return NULL;
213 }
214
215 static void free_irq_entry(struct irq_entry *to_free, bool remove)
216 {
217         if (!to_free)
218                 return;
219
220         if (remove)
221                 os_del_epoll_fd(to_free->fd);
222         list_del(&to_free->list);
223         kfree(to_free);
224 }
225
226 static bool update_irq_entry(struct irq_entry *entry)
227 {
228         enum um_irq_type i;
229         int events = 0;
230
231         for (i = 0; i < NUM_IRQ_TYPES; i++)
232                 events |= entry->reg[i].events;
233
234         if (events) {
235                 /* will modify (instead of add) if needed */
236                 os_add_epoll_fd(events, entry->fd, entry);
237                 return true;
238         }
239
240         os_del_epoll_fd(entry->fd);
241         return false;
242 }
243
244 static void update_or_free_irq_entry(struct irq_entry *entry)
245 {
246         if (!update_irq_entry(entry))
247                 free_irq_entry(entry, false);
248 }
249
250 static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
251                        void (*timetravel_handler)(int, int, void *,
252                                                   struct time_travel_event *))
253 {
254         struct irq_entry *irq_entry;
255         int err, events = os_event_mask(type);
256         unsigned long flags;
257
258         err = os_set_fd_async(fd);
259         if (err < 0)
260                 goto out;
261
262         spin_lock_irqsave(&irq_lock, flags);
263         irq_entry = get_irq_entry_by_fd(fd);
264         if (irq_entry) {
265                 /* cannot register the same FD twice with the same type */
266                 if (WARN_ON(irq_entry->reg[type].events)) {
267                         err = -EALREADY;
268                         goto out_unlock;
269                 }
270
271                 /* temporarily disable to avoid IRQ-side locking */
272                 os_del_epoll_fd(fd);
273         } else {
274                 irq_entry = kzalloc(sizeof(*irq_entry), GFP_ATOMIC);
275                 if (!irq_entry) {
276                         err = -ENOMEM;
277                         goto out_unlock;
278                 }
279                 irq_entry->fd = fd;
280                 list_add_tail(&irq_entry->list, &active_fds);
281                 maybe_sigio_broken(fd);
282         }
283
284         irq_entry->reg[type].id = dev_id;
285         irq_entry->reg[type].irq = irq;
286         irq_entry->reg[type].active = true;
287         irq_entry->reg[type].events = events;
288
289 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
290         if (um_irq_timetravel_handler_used()) {
291                 irq_entry->reg[type].timetravel_handler = timetravel_handler;
292                 irq_entry->reg[type].event.fn = irq_event_handler;
293         }
294 #endif
295
296         WARN_ON(!update_irq_entry(irq_entry));
297         spin_unlock_irqrestore(&irq_lock, flags);
298
299         return 0;
300 out_unlock:
301         spin_unlock_irqrestore(&irq_lock, flags);
302 out:
303         return err;
304 }
305
306 /*
307  * Remove the entry or entries for a specific FD, if you
308  * don't want to remove all the possible entries then use
309  * um_free_irq() or deactivate_fd() instead.
310  */
311 void free_irq_by_fd(int fd)
312 {
313         struct irq_entry *to_free;
314         unsigned long flags;
315
316         spin_lock_irqsave(&irq_lock, flags);
317         to_free = get_irq_entry_by_fd(fd);
318         free_irq_entry(to_free, true);
319         spin_unlock_irqrestore(&irq_lock, flags);
320 }
321 EXPORT_SYMBOL(free_irq_by_fd);
322
323 static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
324 {
325         struct irq_entry *entry;
326         unsigned long flags;
327
328         spin_lock_irqsave(&irq_lock, flags);
329         list_for_each_entry(entry, &active_fds, list) {
330                 enum um_irq_type i;
331
332                 for (i = 0; i < NUM_IRQ_TYPES; i++) {
333                         struct irq_reg *reg = &entry->reg[i];
334
335                         if (!reg->events)
336                                 continue;
337                         if (reg->irq != irq)
338                                 continue;
339                         if (reg->id != dev)
340                                 continue;
341
342                         os_del_epoll_fd(entry->fd);
343                         reg->events = 0;
344                         update_or_free_irq_entry(entry);
345                         goto out;
346                 }
347         }
348 out:
349         spin_unlock_irqrestore(&irq_lock, flags);
350 }
351
352 void deactivate_fd(int fd, int irqnum)
353 {
354         struct irq_entry *entry;
355         unsigned long flags;
356         enum um_irq_type i;
357
358         os_del_epoll_fd(fd);
359
360         spin_lock_irqsave(&irq_lock, flags);
361         entry = get_irq_entry_by_fd(fd);
362         if (!entry)
363                 goto out;
364
365         for (i = 0; i < NUM_IRQ_TYPES; i++) {
366                 if (!entry->reg[i].events)
367                         continue;
368                 if (entry->reg[i].irq == irqnum)
369                         entry->reg[i].events = 0;
370         }
371
372         update_or_free_irq_entry(entry);
373 out:
374         spin_unlock_irqrestore(&irq_lock, flags);
375
376         ignore_sigio_fd(fd);
377 }
378 EXPORT_SYMBOL(deactivate_fd);
379
380 /*
381  * Called just before shutdown in order to provide a clean exec
382  * environment in case the system is rebooting.  No locking because
383  * that would cause a pointless shutdown hang if something hadn't
384  * released the lock.
385  */
386 int deactivate_all_fds(void)
387 {
388         struct irq_entry *entry;
389
390         /* Stop IO. The IRQ loop has no lock so this is our
391          * only way of making sure we are safe to dispose
392          * of all IRQ handlers
393          */
394         os_set_ioignore();
395
396         /* we can no longer call kfree() here so just deactivate */
397         list_for_each_entry(entry, &active_fds, list)
398                 os_del_epoll_fd(entry->fd);
399         os_close_epoll_fd();
400         return 0;
401 }
402
403 /*
404  * do_IRQ handles all normal device IRQs (the special
405  * SMP cross-CPU interrupts have their own specific
406  * handlers).
407  */
408 unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
409 {
410         struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
411         irq_enter();
412         generic_handle_irq(irq);
413         irq_exit();
414         set_irq_regs(old_regs);
415         return 1;
416 }
417
418 void um_free_irq(int irq, void *dev)
419 {
420         if (WARN(irq < 0 || irq > UM_LAST_SIGNAL_IRQ,
421                  "freeing invalid irq %d", irq))
422                 return;
423
424         free_irq_by_irq_and_dev(irq, dev);
425         free_irq(irq, dev);
426         clear_bit(irq, irqs_allocated);
427 }
428 EXPORT_SYMBOL(um_free_irq);
429
430 static int
431 _um_request_irq(int irq, int fd, enum um_irq_type type,
432                 irq_handler_t handler, unsigned long irqflags,
433                 const char *devname, void *dev_id,
434                 void (*timetravel_handler)(int, int, void *,
435                                            struct time_travel_event *))
436 {
437         int err;
438
439         if (irq == UM_IRQ_ALLOC) {
440                 int i;
441
442                 for (i = UM_FIRST_DYN_IRQ; i < NR_IRQS; i++) {
443                         if (!test_and_set_bit(i, irqs_allocated)) {
444                                 irq = i;
445                                 break;
446                         }
447                 }
448         }
449
450         if (irq < 0)
451                 return -ENOSPC;
452
453         if (fd != -1) {
454                 err = activate_fd(irq, fd, type, dev_id, timetravel_handler);
455                 if (err)
456                         goto error;
457         }
458
459         err = request_irq(irq, handler, irqflags, devname, dev_id);
460         if (err < 0)
461                 goto error;
462
463         return irq;
464 error:
465         clear_bit(irq, irqs_allocated);
466         return err;
467 }
468
469 int um_request_irq(int irq, int fd, enum um_irq_type type,
470                    irq_handler_t handler, unsigned long irqflags,
471                    const char *devname, void *dev_id)
472 {
473         return _um_request_irq(irq, fd, type, handler, irqflags,
474                                devname, dev_id, NULL);
475 }
476 EXPORT_SYMBOL(um_request_irq);
477
478 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
479 int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
480                       irq_handler_t handler, unsigned long irqflags,
481                       const char *devname, void *dev_id,
482                       void (*timetravel_handler)(int, int, void *,
483                                                  struct time_travel_event *))
484 {
485         return _um_request_irq(irq, fd, type, handler, irqflags,
486                                devname, dev_id, timetravel_handler);
487 }
488 EXPORT_SYMBOL(um_request_irq_tt);
489
490 void sigio_run_timetravel_handlers(void)
491 {
492         _sigio_handler(NULL, true);
493 }
494 #endif
495
496 #ifdef CONFIG_PM_SLEEP
497 void um_irqs_suspend(void)
498 {
499         struct irq_entry *entry;
500         unsigned long flags;
501
502         irqs_suspended = true;
503
504         spin_lock_irqsave(&irq_lock, flags);
505         list_for_each_entry(entry, &active_fds, list) {
506                 enum um_irq_type t;
507                 bool clear = true;
508
509                 for (t = 0; t < NUM_IRQ_TYPES; t++) {
510                         if (!entry->reg[t].events)
511                                 continue;
512
513                         /*
514                          * For the SIGIO_WRITE_IRQ, which is used to handle the
515                          * SIGIO workaround thread, we need special handling:
516                          * enable wake for it itself, but below we tell it about
517                          * any FDs that should be suspended.
518                          */
519                         if (entry->reg[t].wakeup ||
520                             entry->reg[t].irq == SIGIO_WRITE_IRQ
521 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
522                             || entry->reg[t].timetravel_handler
523 #endif
524                             ) {
525                                 clear = false;
526                                 break;
527                         }
528                 }
529
530                 if (clear) {
531                         entry->suspended = true;
532                         os_clear_fd_async(entry->fd);
533                         entry->sigio_workaround =
534                                 !__ignore_sigio_fd(entry->fd);
535                 }
536         }
537         spin_unlock_irqrestore(&irq_lock, flags);
538 }
539
540 void um_irqs_resume(void)
541 {
542         struct irq_entry *entry;
543         unsigned long flags;
544
545
546         local_irq_save(flags);
547 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
548         /*
549          * We don't need to lock anything here since we're in resume
550          * and nothing else is running, but have disabled IRQs so we
551          * don't try anything else with the interrupt list from there.
552          */
553         list_for_each_entry(entry, &active_fds, list) {
554                 enum um_irq_type t;
555
556                 for (t = 0; t < NUM_IRQ_TYPES; t++) {
557                         struct irq_reg *reg = &entry->reg[t];
558
559                         if (reg->pending_on_resume) {
560                                 irq_enter();
561                                 generic_handle_irq(reg->irq);
562                                 irq_exit();
563                                 reg->pending_on_resume = false;
564                         }
565                 }
566         }
567 #endif
568
569         spin_lock(&irq_lock);
570         list_for_each_entry(entry, &active_fds, list) {
571                 if (entry->suspended) {
572                         int err = os_set_fd_async(entry->fd);
573
574                         WARN(err < 0, "os_set_fd_async returned %d\n", err);
575                         entry->suspended = false;
576
577                         if (entry->sigio_workaround) {
578                                 err = __add_sigio_fd(entry->fd);
579                                 WARN(err < 0, "add_sigio_returned %d\n", err);
580                         }
581                 }
582         }
583         spin_unlock_irqrestore(&irq_lock, flags);
584
585         irqs_suspended = false;
586         send_sigio_to_self();
587 }
588
589 static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
590 {
591         struct irq_entry *entry;
592         unsigned long flags;
593
594         spin_lock_irqsave(&irq_lock, flags);
595         list_for_each_entry(entry, &active_fds, list) {
596                 enum um_irq_type t;
597
598                 for (t = 0; t < NUM_IRQ_TYPES; t++) {
599                         if (!entry->reg[t].events)
600                                 continue;
601
602                         if (entry->reg[t].irq != d->irq)
603                                 continue;
604                         entry->reg[t].wakeup = on;
605                         goto unlock;
606                 }
607         }
608 unlock:
609         spin_unlock_irqrestore(&irq_lock, flags);
610         return 0;
611 }
612 #else
613 #define normal_irq_set_wake NULL
614 #endif
615
616 /*
617  * irq_chip must define at least enable/disable and ack when
618  * the edge handler is used.
619  */
620 static void dummy(struct irq_data *d)
621 {
622 }
623
624 /* This is used for everything other than the timer. */
625 static struct irq_chip normal_irq_type = {
626         .name = "SIGIO",
627         .irq_disable = dummy,
628         .irq_enable = dummy,
629         .irq_ack = dummy,
630         .irq_mask = dummy,
631         .irq_unmask = dummy,
632         .irq_set_wake = normal_irq_set_wake,
633 };
634
635 static struct irq_chip alarm_irq_type = {
636         .name = "SIGALRM",
637         .irq_disable = dummy,
638         .irq_enable = dummy,
639         .irq_ack = dummy,
640         .irq_mask = dummy,
641         .irq_unmask = dummy,
642 };
643
644 void __init init_IRQ(void)
645 {
646         int i;
647
648         irq_set_chip_and_handler(TIMER_IRQ, &alarm_irq_type, handle_edge_irq);
649
650         for (i = 1; i < UM_LAST_SIGNAL_IRQ; i++)
651                 irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
652         /* Initialize EPOLL Loop */
653         os_setup_epoll();
654 }
655
656 /*
657  * IRQ stack entry and exit:
658  *
659  * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
660  * and switch over to the IRQ stack after some preparation.  We use
661  * sigaltstack to receive signals on a separate stack from the start.
662  * These two functions make sure the rest of the kernel won't be too
663  * upset by being on a different stack.  The IRQ stack has a
664  * thread_info structure at the bottom so that current et al continue
665  * to work.
666  *
667  * to_irq_stack copies the current task's thread_info to the IRQ stack
668  * thread_info and sets the tasks's stack to point to the IRQ stack.
669  *
670  * from_irq_stack copies the thread_info struct back (flags may have
671  * been modified) and resets the task's stack pointer.
672  *
673  * Tricky bits -
674  *
675  * What happens when two signals race each other?  UML doesn't block
676  * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
677  * could arrive while a previous one is still setting up the
678  * thread_info.
679  *
680  * There are three cases -
681  *     The first interrupt on the stack - sets up the thread_info and
682  * handles the interrupt
683  *     A nested interrupt interrupting the copying of the thread_info -
684  * can't handle the interrupt, as the stack is in an unknown state
685  *     A nested interrupt not interrupting the copying of the
686  * thread_info - doesn't do any setup, just handles the interrupt
687  *
688  * The first job is to figure out whether we interrupted stack setup.
689  * This is done by xchging the signal mask with thread_info->pending.
690  * If the value that comes back is zero, then there is no setup in
691  * progress, and the interrupt can be handled.  If the value is
692  * non-zero, then there is stack setup in progress.  In order to have
693  * the interrupt handled, we leave our signal in the mask, and it will
694  * be handled by the upper handler after it has set up the stack.
695  *
696  * Next is to figure out whether we are the outer handler or a nested
697  * one.  As part of setting up the stack, thread_info->real_thread is
698  * set to non-NULL (and is reset to NULL on exit).  This is the
699  * nesting indicator.  If it is non-NULL, then the stack is already
700  * set up and the handler can run.
701  */
702
703 static unsigned long pending_mask;
704
705 unsigned long to_irq_stack(unsigned long *mask_out)
706 {
707         struct thread_info *ti;
708         unsigned long mask, old;
709         int nested;
710
711         mask = xchg(&pending_mask, *mask_out);
712         if (mask != 0) {
713                 /*
714                  * If any interrupts come in at this point, we want to
715                  * make sure that their bits aren't lost by our
716                  * putting our bit in.  So, this loop accumulates bits
717                  * until xchg returns the same value that we put in.
718                  * When that happens, there were no new interrupts,
719                  * and pending_mask contains a bit for each interrupt
720                  * that came in.
721                  */
722                 old = *mask_out;
723                 do {
724                         old |= mask;
725                         mask = xchg(&pending_mask, old);
726                 } while (mask != old);
727                 return 1;
728         }
729
730         ti = current_thread_info();
731         nested = (ti->real_thread != NULL);
732         if (!nested) {
733                 struct task_struct *task;
734                 struct thread_info *tti;
735
736                 task = cpu_tasks[ti->cpu].task;
737                 tti = task_thread_info(task);
738
739                 *ti = *tti;
740                 ti->real_thread = tti;
741                 task->stack = ti;
742         }
743
744         mask = xchg(&pending_mask, 0);
745         *mask_out |= mask | nested;
746         return 0;
747 }
748
749 unsigned long from_irq_stack(int nested)
750 {
751         struct thread_info *ti, *to;
752         unsigned long mask;
753
754         ti = current_thread_info();
755
756         pending_mask = 1;
757
758         to = ti->real_thread;
759         current->stack = to;
760         ti->real_thread = NULL;
761         *to = *ti;
762
763         mask = xchg(&pending_mask, 0);
764         return mask & ~1;
765 }
766