GNU Linux-libre 5.4.257-gnu1
[releases.git] / kernel / irq / irqdesc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
5  *
6  * This file contains the interrupt descriptor management code. Detailed
7  * information is available in Documentation/core-api/genericirq.rst
8  *
9  */
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
17 #include <linux/irqdomain.h>
18 #include <linux/sysfs.h>
19
20 #include "internals.h"
21
22 /*
23  * lockdep: we want to handle all irq_desc locks as a single lock-class:
24  */
25 static struct lock_class_key irq_desc_lock_class;
26
27 #if defined(CONFIG_SMP)
28 static int __init irq_affinity_setup(char *str)
29 {
30         alloc_bootmem_cpumask_var(&irq_default_affinity);
31         cpulist_parse(str, irq_default_affinity);
32         /*
33          * Set at least the boot cpu. We don't want to end up with
34          * bugreports caused by random comandline masks
35          */
36         cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
37         return 1;
38 }
39 __setup("irqaffinity=", irq_affinity_setup);
40
41 static void __init init_irq_default_affinity(void)
42 {
43         if (!cpumask_available(irq_default_affinity))
44                 zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
45         if (cpumask_empty(irq_default_affinity))
46                 cpumask_setall(irq_default_affinity);
47 }
48 #else
49 static void __init init_irq_default_affinity(void)
50 {
51 }
52 #endif
53
54 #ifdef CONFIG_SMP
55 static int alloc_masks(struct irq_desc *desc, int node)
56 {
57         if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
58                                      GFP_KERNEL, node))
59                 return -ENOMEM;
60
61 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
62         if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity,
63                                      GFP_KERNEL, node)) {
64                 free_cpumask_var(desc->irq_common_data.affinity);
65                 return -ENOMEM;
66         }
67 #endif
68
69 #ifdef CONFIG_GENERIC_PENDING_IRQ
70         if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) {
71 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
72                 free_cpumask_var(desc->irq_common_data.effective_affinity);
73 #endif
74                 free_cpumask_var(desc->irq_common_data.affinity);
75                 return -ENOMEM;
76         }
77 #endif
78         return 0;
79 }
80
81 static void desc_smp_init(struct irq_desc *desc, int node,
82                           const struct cpumask *affinity)
83 {
84         if (!affinity)
85                 affinity = irq_default_affinity;
86         cpumask_copy(desc->irq_common_data.affinity, affinity);
87
88 #ifdef CONFIG_GENERIC_PENDING_IRQ
89         cpumask_clear(desc->pending_mask);
90 #endif
91 #ifdef CONFIG_NUMA
92         desc->irq_common_data.node = node;
93 #endif
94 }
95
96 #else
97 static inline int
98 alloc_masks(struct irq_desc *desc, int node) { return 0; }
99 static inline void
100 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
101 #endif
102
103 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
104                               const struct cpumask *affinity, struct module *owner)
105 {
106         int cpu;
107
108         desc->irq_common_data.handler_data = NULL;
109         desc->irq_common_data.msi_desc = NULL;
110
111         desc->irq_data.common = &desc->irq_common_data;
112         desc->irq_data.irq = irq;
113         desc->irq_data.chip = &no_irq_chip;
114         desc->irq_data.chip_data = NULL;
115         irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
116         irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
117         irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
118         desc->handle_irq = handle_bad_irq;
119         desc->depth = 1;
120         desc->irq_count = 0;
121         desc->irqs_unhandled = 0;
122         desc->tot_count = 0;
123         desc->name = NULL;
124         desc->owner = owner;
125         for_each_possible_cpu(cpu)
126                 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
127         desc_smp_init(desc, node, affinity);
128 }
129
130 int nr_irqs = NR_IRQS;
131 EXPORT_SYMBOL_GPL(nr_irqs);
132
133 static DEFINE_MUTEX(sparse_irq_lock);
134 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
135
136 #ifdef CONFIG_SPARSE_IRQ
137
138 static void irq_kobj_release(struct kobject *kobj);
139
140 #ifdef CONFIG_SYSFS
141 static struct kobject *irq_kobj_base;
142
143 #define IRQ_ATTR_RO(_name) \
144 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
145
146 static ssize_t per_cpu_count_show(struct kobject *kobj,
147                                   struct kobj_attribute *attr, char *buf)
148 {
149         struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
150         int cpu, irq = desc->irq_data.irq;
151         ssize_t ret = 0;
152         char *p = "";
153
154         for_each_possible_cpu(cpu) {
155                 unsigned int c = kstat_irqs_cpu(irq, cpu);
156
157                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
158                 p = ",";
159         }
160
161         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
162         return ret;
163 }
164 IRQ_ATTR_RO(per_cpu_count);
165
166 static ssize_t chip_name_show(struct kobject *kobj,
167                               struct kobj_attribute *attr, char *buf)
168 {
169         struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
170         ssize_t ret = 0;
171
172         raw_spin_lock_irq(&desc->lock);
173         if (desc->irq_data.chip && desc->irq_data.chip->name) {
174                 ret = scnprintf(buf, PAGE_SIZE, "%s\n",
175                                 desc->irq_data.chip->name);
176         }
177         raw_spin_unlock_irq(&desc->lock);
178
179         return ret;
180 }
181 IRQ_ATTR_RO(chip_name);
182
183 static ssize_t hwirq_show(struct kobject *kobj,
184                           struct kobj_attribute *attr, char *buf)
185 {
186         struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
187         ssize_t ret = 0;
188
189         raw_spin_lock_irq(&desc->lock);
190         if (desc->irq_data.domain)
191                 ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq);
192         raw_spin_unlock_irq(&desc->lock);
193
194         return ret;
195 }
196 IRQ_ATTR_RO(hwirq);
197
198 static ssize_t type_show(struct kobject *kobj,
199                          struct kobj_attribute *attr, char *buf)
200 {
201         struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
202         ssize_t ret = 0;
203
204         raw_spin_lock_irq(&desc->lock);
205         ret = sprintf(buf, "%s\n",
206                       irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
207         raw_spin_unlock_irq(&desc->lock);
208
209         return ret;
210
211 }
212 IRQ_ATTR_RO(type);
213
214 static ssize_t wakeup_show(struct kobject *kobj,
215                            struct kobj_attribute *attr, char *buf)
216 {
217         struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
218         ssize_t ret = 0;
219
220         raw_spin_lock_irq(&desc->lock);
221         ret = sprintf(buf, "%s\n",
222                       irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled");
223         raw_spin_unlock_irq(&desc->lock);
224
225         return ret;
226
227 }
228 IRQ_ATTR_RO(wakeup);
229
230 static ssize_t name_show(struct kobject *kobj,
231                          struct kobj_attribute *attr, char *buf)
232 {
233         struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
234         ssize_t ret = 0;
235
236         raw_spin_lock_irq(&desc->lock);
237         if (desc->name)
238                 ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name);
239         raw_spin_unlock_irq(&desc->lock);
240
241         return ret;
242 }
243 IRQ_ATTR_RO(name);
244
245 static ssize_t actions_show(struct kobject *kobj,
246                             struct kobj_attribute *attr, char *buf)
247 {
248         struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
249         struct irqaction *action;
250         ssize_t ret = 0;
251         char *p = "";
252
253         raw_spin_lock_irq(&desc->lock);
254         for (action = desc->action; action != NULL; action = action->next) {
255                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
256                                  p, action->name);
257                 p = ",";
258         }
259         raw_spin_unlock_irq(&desc->lock);
260
261         if (ret)
262                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
263
264         return ret;
265 }
266 IRQ_ATTR_RO(actions);
267
268 static struct attribute *irq_attrs[] = {
269         &per_cpu_count_attr.attr,
270         &chip_name_attr.attr,
271         &hwirq_attr.attr,
272         &type_attr.attr,
273         &wakeup_attr.attr,
274         &name_attr.attr,
275         &actions_attr.attr,
276         NULL
277 };
278 ATTRIBUTE_GROUPS(irq);
279
280 static struct kobj_type irq_kobj_type = {
281         .release        = irq_kobj_release,
282         .sysfs_ops      = &kobj_sysfs_ops,
283         .default_groups = irq_groups,
284 };
285
286 static void irq_sysfs_add(int irq, struct irq_desc *desc)
287 {
288         if (irq_kobj_base) {
289                 /*
290                  * Continue even in case of failure as this is nothing
291                  * crucial and failures in the late irq_sysfs_init()
292                  * cannot be rolled back.
293                  */
294                 if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
295                         pr_warn("Failed to add kobject for irq %d\n", irq);
296                 else
297                         desc->istate |= IRQS_SYSFS;
298         }
299 }
300
301 static void irq_sysfs_del(struct irq_desc *desc)
302 {
303         /*
304          * Only invoke kobject_del() when kobject_add() was successfully
305          * invoked for the descriptor. This covers both early boot, where
306          * sysfs is not initialized yet, and the case of a failed
307          * kobject_add() invocation.
308          */
309         if (desc->istate & IRQS_SYSFS)
310                 kobject_del(&desc->kobj);
311 }
312
313 static int __init irq_sysfs_init(void)
314 {
315         struct irq_desc *desc;
316         int irq;
317
318         /* Prevent concurrent irq alloc/free */
319         irq_lock_sparse();
320
321         irq_kobj_base = kobject_create_and_add("irq", kernel_kobj);
322         if (!irq_kobj_base) {
323                 irq_unlock_sparse();
324                 return -ENOMEM;
325         }
326
327         /* Add the already allocated interrupts */
328         for_each_irq_desc(irq, desc)
329                 irq_sysfs_add(irq, desc);
330         irq_unlock_sparse();
331
332         return 0;
333 }
334 postcore_initcall(irq_sysfs_init);
335
336 #else /* !CONFIG_SYSFS */
337
338 static struct kobj_type irq_kobj_type = {
339         .release        = irq_kobj_release,
340 };
341
342 static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
343 static void irq_sysfs_del(struct irq_desc *desc) {}
344
345 #endif /* CONFIG_SYSFS */
346
347 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
348
349 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
350 {
351         radix_tree_insert(&irq_desc_tree, irq, desc);
352 }
353
354 struct irq_desc *irq_to_desc(unsigned int irq)
355 {
356         return radix_tree_lookup(&irq_desc_tree, irq);
357 }
358 EXPORT_SYMBOL(irq_to_desc);
359
360 static void delete_irq_desc(unsigned int irq)
361 {
362         radix_tree_delete(&irq_desc_tree, irq);
363 }
364
365 #ifdef CONFIG_SMP
366 static void free_masks(struct irq_desc *desc)
367 {
368 #ifdef CONFIG_GENERIC_PENDING_IRQ
369         free_cpumask_var(desc->pending_mask);
370 #endif
371         free_cpumask_var(desc->irq_common_data.affinity);
372 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
373         free_cpumask_var(desc->irq_common_data.effective_affinity);
374 #endif
375 }
376 #else
377 static inline void free_masks(struct irq_desc *desc) { }
378 #endif
379
380 void irq_lock_sparse(void)
381 {
382         mutex_lock(&sparse_irq_lock);
383 }
384
385 void irq_unlock_sparse(void)
386 {
387         mutex_unlock(&sparse_irq_lock);
388 }
389
390 static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
391                                    const struct cpumask *affinity,
392                                    struct module *owner)
393 {
394         struct irq_desc *desc;
395
396         desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
397         if (!desc)
398                 return NULL;
399         /* allocate based on nr_cpu_ids */
400         desc->kstat_irqs = alloc_percpu(unsigned int);
401         if (!desc->kstat_irqs)
402                 goto err_desc;
403
404         if (alloc_masks(desc, node))
405                 goto err_kstat;
406
407         raw_spin_lock_init(&desc->lock);
408         lockdep_set_class(&desc->lock, &irq_desc_lock_class);
409         mutex_init(&desc->request_mutex);
410         init_rcu_head(&desc->rcu);
411         init_waitqueue_head(&desc->wait_for_threads);
412
413         desc_set_defaults(irq, desc, node, affinity, owner);
414         irqd_set(&desc->irq_data, flags);
415         kobject_init(&desc->kobj, &irq_kobj_type);
416
417         return desc;
418
419 err_kstat:
420         free_percpu(desc->kstat_irqs);
421 err_desc:
422         kfree(desc);
423         return NULL;
424 }
425
426 static void irq_kobj_release(struct kobject *kobj)
427 {
428         struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
429
430         free_masks(desc);
431         free_percpu(desc->kstat_irqs);
432         kfree(desc);
433 }
434
435 static void delayed_free_desc(struct rcu_head *rhp)
436 {
437         struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
438
439         kobject_put(&desc->kobj);
440 }
441
442 static void free_desc(unsigned int irq)
443 {
444         struct irq_desc *desc = irq_to_desc(irq);
445
446         irq_remove_debugfs_entry(desc);
447         unregister_irq_proc(irq, desc);
448
449         /*
450          * sparse_irq_lock protects also show_interrupts() and
451          * kstat_irq_usr(). Once we deleted the descriptor from the
452          * sparse tree we can free it. Access in proc will fail to
453          * lookup the descriptor.
454          *
455          * The sysfs entry must be serialized against a concurrent
456          * irq_sysfs_init() as well.
457          */
458         irq_sysfs_del(desc);
459         delete_irq_desc(irq);
460
461         /*
462          * We free the descriptor, masks and stat fields via RCU. That
463          * allows demultiplex interrupts to do rcu based management of
464          * the child interrupts.
465          * This also allows us to use rcu in kstat_irqs_usr().
466          */
467         call_rcu(&desc->rcu, delayed_free_desc);
468 }
469
470 static int alloc_descs(unsigned int start, unsigned int cnt, int node,
471                        const struct irq_affinity_desc *affinity,
472                        struct module *owner)
473 {
474         struct irq_desc *desc;
475         int i;
476
477         /* Validate affinity mask(s) */
478         if (affinity) {
479                 for (i = 0; i < cnt; i++) {
480                         if (cpumask_empty(&affinity[i].mask))
481                                 return -EINVAL;
482                 }
483         }
484
485         for (i = 0; i < cnt; i++) {
486                 const struct cpumask *mask = NULL;
487                 unsigned int flags = 0;
488
489                 if (affinity) {
490                         if (affinity->is_managed) {
491                                 flags = IRQD_AFFINITY_MANAGED |
492                                         IRQD_MANAGED_SHUTDOWN;
493                         }
494                         mask = &affinity->mask;
495                         node = cpu_to_node(cpumask_first(mask));
496                         affinity++;
497                 }
498
499                 desc = alloc_desc(start + i, node, flags, mask, owner);
500                 if (!desc)
501                         goto err;
502                 irq_insert_desc(start + i, desc);
503                 irq_sysfs_add(start + i, desc);
504                 irq_add_debugfs_entry(start + i, desc);
505         }
506         bitmap_set(allocated_irqs, start, cnt);
507         return start;
508
509 err:
510         for (i--; i >= 0; i--)
511                 free_desc(start + i);
512         return -ENOMEM;
513 }
514
515 static int irq_expand_nr_irqs(unsigned int nr)
516 {
517         if (nr > IRQ_BITMAP_BITS)
518                 return -ENOMEM;
519         nr_irqs = nr;
520         return 0;
521 }
522
523 int __init early_irq_init(void)
524 {
525         int i, initcnt, node = first_online_node;
526         struct irq_desc *desc;
527
528         init_irq_default_affinity();
529
530         /* Let arch update nr_irqs and return the nr of preallocated irqs */
531         initcnt = arch_probe_nr_irqs();
532         printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n",
533                NR_IRQS, nr_irqs, initcnt);
534
535         if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
536                 nr_irqs = IRQ_BITMAP_BITS;
537
538         if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
539                 initcnt = IRQ_BITMAP_BITS;
540
541         if (initcnt > nr_irqs)
542                 nr_irqs = initcnt;
543
544         for (i = 0; i < initcnt; i++) {
545                 desc = alloc_desc(i, node, 0, NULL, NULL);
546                 set_bit(i, allocated_irqs);
547                 irq_insert_desc(i, desc);
548         }
549         return arch_early_irq_init();
550 }
551
552 #else /* !CONFIG_SPARSE_IRQ */
553
554 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
555         [0 ... NR_IRQS-1] = {
556                 .handle_irq     = handle_bad_irq,
557                 .depth          = 1,
558                 .lock           = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
559         }
560 };
561
562 int __init early_irq_init(void)
563 {
564         int count, i, node = first_online_node;
565         struct irq_desc *desc;
566
567         init_irq_default_affinity();
568
569         printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
570
571         desc = irq_desc;
572         count = ARRAY_SIZE(irq_desc);
573
574         for (i = 0; i < count; i++) {
575                 desc[i].kstat_irqs = alloc_percpu(unsigned int);
576                 alloc_masks(&desc[i], node);
577                 raw_spin_lock_init(&desc[i].lock);
578                 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
579                 mutex_init(&desc[i].request_mutex);
580                 init_waitqueue_head(&desc[i].wait_for_threads);
581                 desc_set_defaults(i, &desc[i], node, NULL, NULL);
582         }
583         return arch_early_irq_init();
584 }
585
586 struct irq_desc *irq_to_desc(unsigned int irq)
587 {
588         return (irq < NR_IRQS) ? irq_desc + irq : NULL;
589 }
590 EXPORT_SYMBOL(irq_to_desc);
591
592 static void free_desc(unsigned int irq)
593 {
594         struct irq_desc *desc = irq_to_desc(irq);
595         unsigned long flags;
596
597         raw_spin_lock_irqsave(&desc->lock, flags);
598         desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
599         raw_spin_unlock_irqrestore(&desc->lock, flags);
600 }
601
602 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
603                               const struct irq_affinity_desc *affinity,
604                               struct module *owner)
605 {
606         u32 i;
607
608         for (i = 0; i < cnt; i++) {
609                 struct irq_desc *desc = irq_to_desc(start + i);
610
611                 desc->owner = owner;
612         }
613         bitmap_set(allocated_irqs, start, cnt);
614         return start;
615 }
616
617 static int irq_expand_nr_irqs(unsigned int nr)
618 {
619         return -ENOMEM;
620 }
621
622 void irq_mark_irq(unsigned int irq)
623 {
624         mutex_lock(&sparse_irq_lock);
625         bitmap_set(allocated_irqs, irq, 1);
626         mutex_unlock(&sparse_irq_lock);
627 }
628
629 #ifdef CONFIG_GENERIC_IRQ_LEGACY
630 void irq_init_desc(unsigned int irq)
631 {
632         free_desc(irq);
633 }
634 #endif
635
636 #endif /* !CONFIG_SPARSE_IRQ */
637
638 /**
639  * generic_handle_irq - Invoke the handler for a particular irq
640  * @irq:        The irq number to handle
641  *
642  */
643 int generic_handle_irq(unsigned int irq)
644 {
645         struct irq_desc *desc = irq_to_desc(irq);
646
647         if (!desc)
648                 return -EINVAL;
649         generic_handle_irq_desc(desc);
650         return 0;
651 }
652 EXPORT_SYMBOL_GPL(generic_handle_irq);
653
654 #ifdef CONFIG_HANDLE_DOMAIN_IRQ
655 /**
656  * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain
657  * @domain:     The domain where to perform the lookup
658  * @hwirq:      The HW irq number to convert to a logical one
659  * @lookup:     Whether to perform the domain lookup or not
660  * @regs:       Register file coming from the low-level handling code
661  *
662  * Returns:     0 on success, or -EINVAL if conversion has failed
663  */
664 int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
665                         bool lookup, struct pt_regs *regs)
666 {
667         struct pt_regs *old_regs = set_irq_regs(regs);
668         unsigned int irq = hwirq;
669         int ret = 0;
670
671         irq_enter();
672
673 #ifdef CONFIG_IRQ_DOMAIN
674         if (lookup)
675                 irq = irq_find_mapping(domain, hwirq);
676 #endif
677
678         /*
679          * Some hardware gives randomly wrong interrupts.  Rather
680          * than crashing, do something sensible.
681          */
682         if (unlikely(!irq || irq >= nr_irqs)) {
683                 ack_bad_irq(irq);
684                 ret = -EINVAL;
685         } else {
686                 generic_handle_irq(irq);
687         }
688
689         irq_exit();
690         set_irq_regs(old_regs);
691         return ret;
692 }
693
694 #ifdef CONFIG_IRQ_DOMAIN
695 /**
696  * handle_domain_nmi - Invoke the handler for a HW irq belonging to a domain
697  * @domain:     The domain where to perform the lookup
698  * @hwirq:      The HW irq number to convert to a logical one
699  * @regs:       Register file coming from the low-level handling code
700  *
701  *              This function must be called from an NMI context.
702  *
703  * Returns:     0 on success, or -EINVAL if conversion has failed
704  */
705 int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
706                       struct pt_regs *regs)
707 {
708         struct pt_regs *old_regs = set_irq_regs(regs);
709         unsigned int irq;
710         int ret = 0;
711
712         /*
713          * NMI context needs to be setup earlier in order to deal with tracing.
714          */
715         WARN_ON(!in_nmi());
716
717         irq = irq_find_mapping(domain, hwirq);
718
719         /*
720          * ack_bad_irq is not NMI-safe, just report
721          * an invalid interrupt.
722          */
723         if (likely(irq))
724                 generic_handle_irq(irq);
725         else
726                 ret = -EINVAL;
727
728         set_irq_regs(old_regs);
729         return ret;
730 }
731 #endif
732 #endif
733
734 /* Dynamic interrupt handling */
735
736 /**
737  * irq_free_descs - free irq descriptors
738  * @from:       Start of descriptor range
739  * @cnt:        Number of consecutive irqs to free
740  */
741 void irq_free_descs(unsigned int from, unsigned int cnt)
742 {
743         int i;
744
745         if (from >= nr_irqs || (from + cnt) > nr_irqs)
746                 return;
747
748         mutex_lock(&sparse_irq_lock);
749         for (i = 0; i < cnt; i++)
750                 free_desc(from + i);
751
752         bitmap_clear(allocated_irqs, from, cnt);
753         mutex_unlock(&sparse_irq_lock);
754 }
755 EXPORT_SYMBOL_GPL(irq_free_descs);
756
757 /**
758  * irq_alloc_descs - allocate and initialize a range of irq descriptors
759  * @irq:        Allocate for specific irq number if irq >= 0
760  * @from:       Start the search from this irq number
761  * @cnt:        Number of consecutive irqs to allocate.
762  * @node:       Preferred node on which the irq descriptor should be allocated
763  * @owner:      Owning module (can be NULL)
764  * @affinity:   Optional pointer to an affinity mask array of size @cnt which
765  *              hints where the irq descriptors should be allocated and which
766  *              default affinities to use
767  *
768  * Returns the first irq number or error code
769  */
770 int __ref
771 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
772                   struct module *owner, const struct irq_affinity_desc *affinity)
773 {
774         int start, ret;
775
776         if (!cnt)
777                 return -EINVAL;
778
779         if (irq >= 0) {
780                 if (from > irq)
781                         return -EINVAL;
782                 from = irq;
783         } else {
784                 /*
785                  * For interrupts which are freely allocated the
786                  * architecture can force a lower bound to the @from
787                  * argument. x86 uses this to exclude the GSI space.
788                  */
789                 from = arch_dynirq_lower_bound(from);
790         }
791
792         mutex_lock(&sparse_irq_lock);
793
794         start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
795                                            from, cnt, 0);
796         ret = -EEXIST;
797         if (irq >=0 && start != irq)
798                 goto unlock;
799
800         if (start + cnt > nr_irqs) {
801                 ret = irq_expand_nr_irqs(start + cnt);
802                 if (ret)
803                         goto unlock;
804         }
805         ret = alloc_descs(start, cnt, node, affinity, owner);
806 unlock:
807         mutex_unlock(&sparse_irq_lock);
808         return ret;
809 }
810 EXPORT_SYMBOL_GPL(__irq_alloc_descs);
811
812 #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
813 /**
814  * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
815  * @cnt:        number of interrupts to allocate
816  * @node:       node on which to allocate
817  *
818  * Returns an interrupt number > 0 or 0, if the allocation fails.
819  */
820 unsigned int irq_alloc_hwirqs(int cnt, int node)
821 {
822         int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL);
823
824         if (irq < 0)
825                 return 0;
826
827         for (i = irq; cnt > 0; i++, cnt--) {
828                 if (arch_setup_hwirq(i, node))
829                         goto err;
830                 irq_clear_status_flags(i, _IRQ_NOREQUEST);
831         }
832         return irq;
833
834 err:
835         for (i--; i >= irq; i--) {
836                 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
837                 arch_teardown_hwirq(i);
838         }
839         irq_free_descs(irq, cnt);
840         return 0;
841 }
842 EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
843
844 /**
845  * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
846  * @from:       Free from irq number
847  * @cnt:        number of interrupts to free
848  *
849  */
850 void irq_free_hwirqs(unsigned int from, int cnt)
851 {
852         int i, j;
853
854         for (i = from, j = cnt; j > 0; i++, j--) {
855                 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
856                 arch_teardown_hwirq(i);
857         }
858         irq_free_descs(from, cnt);
859 }
860 EXPORT_SYMBOL_GPL(irq_free_hwirqs);
861 #endif
862
863 /**
864  * irq_get_next_irq - get next allocated irq number
865  * @offset:     where to start the search
866  *
867  * Returns next irq number after offset or nr_irqs if none is found.
868  */
869 unsigned int irq_get_next_irq(unsigned int offset)
870 {
871         return find_next_bit(allocated_irqs, nr_irqs, offset);
872 }
873
874 struct irq_desc *
875 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
876                     unsigned int check)
877 {
878         struct irq_desc *desc = irq_to_desc(irq);
879
880         if (desc) {
881                 if (check & _IRQ_DESC_CHECK) {
882                         if ((check & _IRQ_DESC_PERCPU) &&
883                             !irq_settings_is_per_cpu_devid(desc))
884                                 return NULL;
885
886                         if (!(check & _IRQ_DESC_PERCPU) &&
887                             irq_settings_is_per_cpu_devid(desc))
888                                 return NULL;
889                 }
890
891                 if (bus)
892                         chip_bus_lock(desc);
893                 raw_spin_lock_irqsave(&desc->lock, *flags);
894         }
895         return desc;
896 }
897
898 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
899 {
900         raw_spin_unlock_irqrestore(&desc->lock, flags);
901         if (bus)
902                 chip_bus_sync_unlock(desc);
903 }
904
905 int irq_set_percpu_devid_partition(unsigned int irq,
906                                    const struct cpumask *affinity)
907 {
908         struct irq_desc *desc = irq_to_desc(irq);
909
910         if (!desc)
911                 return -EINVAL;
912
913         if (desc->percpu_enabled)
914                 return -EINVAL;
915
916         desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
917
918         if (!desc->percpu_enabled)
919                 return -ENOMEM;
920
921         if (affinity)
922                 desc->percpu_affinity = affinity;
923         else
924                 desc->percpu_affinity = cpu_possible_mask;
925
926         irq_set_percpu_devid_flags(irq);
927         return 0;
928 }
929
930 int irq_set_percpu_devid(unsigned int irq)
931 {
932         return irq_set_percpu_devid_partition(irq, NULL);
933 }
934
935 int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
936 {
937         struct irq_desc *desc = irq_to_desc(irq);
938
939         if (!desc || !desc->percpu_enabled)
940                 return -EINVAL;
941
942         if (affinity)
943                 cpumask_copy(affinity, desc->percpu_affinity);
944
945         return 0;
946 }
947 EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition);
948
949 void kstat_incr_irq_this_cpu(unsigned int irq)
950 {
951         kstat_incr_irqs_this_cpu(irq_to_desc(irq));
952 }
953
954 /**
955  * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
956  * @irq:        The interrupt number
957  * @cpu:        The cpu number
958  *
959  * Returns the sum of interrupt counts on @cpu since boot for
960  * @irq. The caller must ensure that the interrupt is not removed
961  * concurrently.
962  */
963 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
964 {
965         struct irq_desc *desc = irq_to_desc(irq);
966
967         return desc && desc->kstat_irqs ?
968                         *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
969 }
970
971 static bool irq_is_nmi(struct irq_desc *desc)
972 {
973         return desc->istate & IRQS_NMI;
974 }
975
976 /**
977  * kstat_irqs - Get the statistics for an interrupt
978  * @irq:        The interrupt number
979  *
980  * Returns the sum of interrupt counts on all cpus since boot for
981  * @irq. The caller must ensure that the interrupt is not removed
982  * concurrently.
983  */
984 unsigned int kstat_irqs(unsigned int irq)
985 {
986         struct irq_desc *desc = irq_to_desc(irq);
987         unsigned int sum = 0;
988         int cpu;
989
990         if (!desc || !desc->kstat_irqs)
991                 return 0;
992         if (!irq_settings_is_per_cpu_devid(desc) &&
993             !irq_settings_is_per_cpu(desc) &&
994             !irq_is_nmi(desc))
995             return desc->tot_count;
996
997         for_each_possible_cpu(cpu)
998                 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
999         return sum;
1000 }
1001
1002 /**
1003  * kstat_irqs_usr - Get the statistics for an interrupt
1004  * @irq:        The interrupt number
1005  *
1006  * Returns the sum of interrupt counts on all cpus since boot for @irq.
1007  * Contrary to kstat_irqs() this can be called from any context.
1008  * It uses rcu since a concurrent removal of an interrupt descriptor is
1009  * observing an rcu grace period before delayed_free_desc()/irq_kobj_release().
1010  */
1011 unsigned int kstat_irqs_usr(unsigned int irq)
1012 {
1013         unsigned int sum;
1014
1015         rcu_read_lock();
1016         sum = kstat_irqs(irq);
1017         rcu_read_unlock();
1018         return sum;
1019 }