GNU Linux-libre 4.9-gnu1
[releases.git] / kernel / irq / spurious.c
1 /*
2  * linux/kernel/irq/spurious.c
3  *
4  * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
5  *
6  * This file contains spurious interrupt handling.
7  */
8
9 #include <linux/jiffies.h>
10 #include <linux/irq.h>
11 #include <linux/module.h>
12 #include <linux/kallsyms.h>
13 #include <linux/interrupt.h>
14 #include <linux/moduleparam.h>
15 #include <linux/timer.h>
16
17 #include "internals.h"
18
19 static int irqfixup __read_mostly;
20
21 #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
22 static void poll_spurious_irqs(unsigned long dummy);
23 static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0);
24 static int irq_poll_cpu;
25 static atomic_t irq_poll_active;
26
27 /*
28  * We wait here for a poller to finish.
29  *
30  * If the poll runs on this CPU, then we yell loudly and return
31  * false. That will leave the interrupt line disabled in the worst
32  * case, but it should never happen.
33  *
34  * We wait until the poller is done and then recheck disabled and
35  * action (about to be disabled). Only if it's still active, we return
36  * true and let the handler run.
37  */
38 bool irq_wait_for_poll(struct irq_desc *desc)
39 {
40         if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
41                       "irq poll in progress on cpu %d for irq %d\n",
42                       smp_processor_id(), desc->irq_data.irq))
43                 return false;
44
45 #ifdef CONFIG_SMP
46         do {
47                 raw_spin_unlock(&desc->lock);
48                 while (irqd_irq_inprogress(&desc->irq_data))
49                         cpu_relax();
50                 raw_spin_lock(&desc->lock);
51         } while (irqd_irq_inprogress(&desc->irq_data));
52         /* Might have been disabled in meantime */
53         return !irqd_irq_disabled(&desc->irq_data) && desc->action;
54 #else
55         return false;
56 #endif
57 }
58
59
60 /*
61  * Recovery handler for misrouted interrupts.
62  */
63 static int try_one_irq(struct irq_desc *desc, bool force)
64 {
65         irqreturn_t ret = IRQ_NONE;
66         struct irqaction *action;
67
68         raw_spin_lock(&desc->lock);
69
70         /*
71          * PER_CPU, nested thread interrupts and interrupts explicitely
72          * marked polled are excluded from polling.
73          */
74         if (irq_settings_is_per_cpu(desc) ||
75             irq_settings_is_nested_thread(desc) ||
76             irq_settings_is_polled(desc))
77                 goto out;
78
79         /*
80          * Do not poll disabled interrupts unless the spurious
81          * disabled poller asks explicitely.
82          */
83         if (irqd_irq_disabled(&desc->irq_data) && !force)
84                 goto out;
85
86         /*
87          * All handlers must agree on IRQF_SHARED, so we test just the
88          * first.
89          */
90         action = desc->action;
91         if (!action || !(action->flags & IRQF_SHARED) ||
92             (action->flags & __IRQF_TIMER))
93                 goto out;
94
95         /* Already running on another processor */
96         if (irqd_irq_inprogress(&desc->irq_data)) {
97                 /*
98                  * Already running: If it is shared get the other
99                  * CPU to go looking for our mystery interrupt too
100                  */
101                 desc->istate |= IRQS_PENDING;
102                 goto out;
103         }
104
105         /* Mark it poll in progress */
106         desc->istate |= IRQS_POLL_INPROGRESS;
107         do {
108                 if (handle_irq_event(desc) == IRQ_HANDLED)
109                         ret = IRQ_HANDLED;
110                 /* Make sure that there is still a valid action */
111                 action = desc->action;
112         } while ((desc->istate & IRQS_PENDING) && action);
113         desc->istate &= ~IRQS_POLL_INPROGRESS;
114 out:
115         raw_spin_unlock(&desc->lock);
116         return ret == IRQ_HANDLED;
117 }
118
119 static int misrouted_irq(int irq)
120 {
121         struct irq_desc *desc;
122         int i, ok = 0;
123
124         if (atomic_inc_return(&irq_poll_active) != 1)
125                 goto out;
126
127         irq_poll_cpu = smp_processor_id();
128
129         for_each_irq_desc(i, desc) {
130                 if (!i)
131                          continue;
132
133                 if (i == irq)   /* Already tried */
134                         continue;
135
136                 if (try_one_irq(desc, false))
137                         ok = 1;
138         }
139 out:
140         atomic_dec(&irq_poll_active);
141         /* So the caller can adjust the irq error counts */
142         return ok;
143 }
144
145 static void poll_spurious_irqs(unsigned long dummy)
146 {
147         struct irq_desc *desc;
148         int i;
149
150         if (atomic_inc_return(&irq_poll_active) != 1)
151                 goto out;
152         irq_poll_cpu = smp_processor_id();
153
154         for_each_irq_desc(i, desc) {
155                 unsigned int state;
156
157                 if (!i)
158                          continue;
159
160                 /* Racy but it doesn't matter */
161                 state = desc->istate;
162                 barrier();
163                 if (!(state & IRQS_SPURIOUS_DISABLED))
164                         continue;
165
166                 local_irq_disable();
167                 try_one_irq(desc, true);
168                 local_irq_enable();
169         }
170 out:
171         atomic_dec(&irq_poll_active);
172         mod_timer(&poll_spurious_irq_timer,
173                   jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
174 }
175
176 static inline int bad_action_ret(irqreturn_t action_ret)
177 {
178         if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
179                 return 0;
180         return 1;
181 }
182
183 /*
184  * If 99,900 of the previous 100,000 interrupts have not been handled
185  * then assume that the IRQ is stuck in some manner. Drop a diagnostic
186  * and try to turn the IRQ off.
187  *
188  * (The other 100-of-100,000 interrupts may have been a correctly
189  *  functioning device sharing an IRQ with the failing one)
190  */
191 static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
192 {
193         unsigned int irq = irq_desc_get_irq(desc);
194         struct irqaction *action;
195         unsigned long flags;
196
197         if (bad_action_ret(action_ret)) {
198                 printk(KERN_ERR "irq event %d: bogus return value %x\n",
199                                 irq, action_ret);
200         } else {
201                 printk(KERN_ERR "irq %d: nobody cared (try booting with "
202                                 "the \"irqpoll\" option)\n", irq);
203         }
204         dump_stack();
205         printk(KERN_ERR "handlers:\n");
206
207         /*
208          * We need to take desc->lock here. note_interrupt() is called
209          * w/o desc->lock held, but IRQ_PROGRESS set. We might race
210          * with something else removing an action. It's ok to take
211          * desc->lock here. See synchronize_irq().
212          */
213         raw_spin_lock_irqsave(&desc->lock, flags);
214         for_each_action_of_desc(desc, action) {
215                 printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
216                 if (action->thread_fn)
217                         printk(KERN_CONT " threaded [<%p>] %pf",
218                                         action->thread_fn, action->thread_fn);
219                 printk(KERN_CONT "\n");
220         }
221         raw_spin_unlock_irqrestore(&desc->lock, flags);
222 }
223
224 static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
225 {
226         static int count = 100;
227
228         if (count > 0) {
229                 count--;
230                 __report_bad_irq(desc, action_ret);
231         }
232 }
233
234 static inline int
235 try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
236                   irqreturn_t action_ret)
237 {
238         struct irqaction *action;
239
240         if (!irqfixup)
241                 return 0;
242
243         /* We didn't actually handle the IRQ - see if it was misrouted? */
244         if (action_ret == IRQ_NONE)
245                 return 1;
246
247         /*
248          * But for 'irqfixup == 2' we also do it for handled interrupts if
249          * they are marked as IRQF_IRQPOLL (or for irq zero, which is the
250          * traditional PC timer interrupt.. Legacy)
251          */
252         if (irqfixup < 2)
253                 return 0;
254
255         if (!irq)
256                 return 1;
257
258         /*
259          * Since we don't get the descriptor lock, "action" can
260          * change under us.  We don't really care, but we don't
261          * want to follow a NULL pointer. So tell the compiler to
262          * just load it once by using a barrier.
263          */
264         action = desc->action;
265         barrier();
266         return action && (action->flags & IRQF_IRQPOLL);
267 }
268
269 #define SPURIOUS_DEFERRED       0x80000000
270
271 void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret)
272 {
273         unsigned int irq;
274
275         if (desc->istate & IRQS_POLL_INPROGRESS ||
276             irq_settings_is_polled(desc))
277                 return;
278
279         if (bad_action_ret(action_ret)) {
280                 report_bad_irq(desc, action_ret);
281                 return;
282         }
283
284         /*
285          * We cannot call note_interrupt from the threaded handler
286          * because we need to look at the compound of all handlers
287          * (primary and threaded). Aside of that in the threaded
288          * shared case we have no serialization against an incoming
289          * hardware interrupt while we are dealing with a threaded
290          * result.
291          *
292          * So in case a thread is woken, we just note the fact and
293          * defer the analysis to the next hardware interrupt.
294          *
295          * The threaded handlers store whether they sucessfully
296          * handled an interrupt and we check whether that number
297          * changed versus the last invocation.
298          *
299          * We could handle all interrupts with the delayed by one
300          * mechanism, but for the non forced threaded case we'd just
301          * add pointless overhead to the straight hardirq interrupts
302          * for the sake of a few lines less code.
303          */
304         if (action_ret & IRQ_WAKE_THREAD) {
305                 /*
306                  * There is a thread woken. Check whether one of the
307                  * shared primary handlers returned IRQ_HANDLED. If
308                  * not we defer the spurious detection to the next
309                  * interrupt.
310                  */
311                 if (action_ret == IRQ_WAKE_THREAD) {
312                         int handled;
313                         /*
314                          * We use bit 31 of thread_handled_last to
315                          * denote the deferred spurious detection
316                          * active. No locking necessary as
317                          * thread_handled_last is only accessed here
318                          * and we have the guarantee that hard
319                          * interrupts are not reentrant.
320                          */
321                         if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
322                                 desc->threads_handled_last |= SPURIOUS_DEFERRED;
323                                 return;
324                         }
325                         /*
326                          * Check whether one of the threaded handlers
327                          * returned IRQ_HANDLED since the last
328                          * interrupt happened.
329                          *
330                          * For simplicity we just set bit 31, as it is
331                          * set in threads_handled_last as well. So we
332                          * avoid extra masking. And we really do not
333                          * care about the high bits of the handled
334                          * count. We just care about the count being
335                          * different than the one we saw before.
336                          */
337                         handled = atomic_read(&desc->threads_handled);
338                         handled |= SPURIOUS_DEFERRED;
339                         if (handled != desc->threads_handled_last) {
340                                 action_ret = IRQ_HANDLED;
341                                 /*
342                                  * Note: We keep the SPURIOUS_DEFERRED
343                                  * bit set. We are handling the
344                                  * previous invocation right now.
345                                  * Keep it for the current one, so the
346                                  * next hardware interrupt will
347                                  * account for it.
348                                  */
349                                 desc->threads_handled_last = handled;
350                         } else {
351                                 /*
352                                  * None of the threaded handlers felt
353                                  * responsible for the last interrupt
354                                  *
355                                  * We keep the SPURIOUS_DEFERRED bit
356                                  * set in threads_handled_last as we
357                                  * need to account for the current
358                                  * interrupt as well.
359                                  */
360                                 action_ret = IRQ_NONE;
361                         }
362                 } else {
363                         /*
364                          * One of the primary handlers returned
365                          * IRQ_HANDLED. So we don't care about the
366                          * threaded handlers on the same line. Clear
367                          * the deferred detection bit.
368                          *
369                          * In theory we could/should check whether the
370                          * deferred bit is set and take the result of
371                          * the previous run into account here as
372                          * well. But it's really not worth the
373                          * trouble. If every other interrupt is
374                          * handled we never trigger the spurious
375                          * detector. And if this is just the one out
376                          * of 100k unhandled ones which is handled
377                          * then we merily delay the spurious detection
378                          * by one hard interrupt. Not a real problem.
379                          */
380                         desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
381                 }
382         }
383
384         if (unlikely(action_ret == IRQ_NONE)) {
385                 /*
386                  * If we are seeing only the odd spurious IRQ caused by
387                  * bus asynchronicity then don't eventually trigger an error,
388                  * otherwise the counter becomes a doomsday timer for otherwise
389                  * working systems
390                  */
391                 if (time_after(jiffies, desc->last_unhandled + HZ/10))
392                         desc->irqs_unhandled = 1;
393                 else
394                         desc->irqs_unhandled++;
395                 desc->last_unhandled = jiffies;
396         }
397
398         irq = irq_desc_get_irq(desc);
399         if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
400                 int ok = misrouted_irq(irq);
401                 if (action_ret == IRQ_NONE)
402                         desc->irqs_unhandled -= ok;
403         }
404
405         desc->irq_count++;
406         if (likely(desc->irq_count < 100000))
407                 return;
408
409         desc->irq_count = 0;
410         if (unlikely(desc->irqs_unhandled > 99900)) {
411                 /*
412                  * The interrupt is stuck
413                  */
414                 __report_bad_irq(desc, action_ret);
415                 /*
416                  * Now kill the IRQ
417                  */
418                 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
419                 desc->istate |= IRQS_SPURIOUS_DISABLED;
420                 desc->depth++;
421                 irq_disable(desc);
422
423                 mod_timer(&poll_spurious_irq_timer,
424                           jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
425         }
426         desc->irqs_unhandled = 0;
427 }
428
429 bool noirqdebug __read_mostly;
430
431 int noirqdebug_setup(char *str)
432 {
433         noirqdebug = 1;
434         printk(KERN_INFO "IRQ lockup detection disabled\n");
435
436         return 1;
437 }
438
439 __setup("noirqdebug", noirqdebug_setup);
440 module_param(noirqdebug, bool, 0644);
441 MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
442
443 static int __init irqfixup_setup(char *str)
444 {
445         irqfixup = 1;
446         printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
447         printk(KERN_WARNING "This may impact system performance.\n");
448
449         return 1;
450 }
451
452 __setup("irqfixup", irqfixup_setup);
453 module_param(irqfixup, int, 0644);
454
455 static int __init irqpoll_setup(char *str)
456 {
457         irqfixup = 2;
458         printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
459                                 "enabled\n");
460         printk(KERN_WARNING "This may significantly impact system "
461                                 "performance\n");
462         return 1;
463 }
464
465 __setup("irqpoll", irqpoll_setup);