#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/maple_tree.h>
#include <linux/irqdomain.h>
#include <linux/sysfs.h>
#include "internals.h"
static struct lock_class_key irq_desc_lock_class;
#if defined(CONFIG_SMP)
static int __init irq_affinity_setup(char *str)
{ … }
__setup(…);
static void __init init_irq_default_affinity(void)
{ … }
#else
static void __init init_irq_default_affinity(void)
{
}
#endif
#ifdef CONFIG_SMP
static int alloc_masks(struct irq_desc *desc, int node)
{ … }
static void desc_smp_init(struct irq_desc *desc, int node,
const struct cpumask *affinity)
{ … }
static void free_masks(struct irq_desc *desc)
{ … }
#else
static inline int
alloc_masks(struct irq_desc *desc, int node) { return 0; }
static inline void
desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
static inline void free_masks(struct irq_desc *desc) { }
#endif
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
const struct cpumask *affinity, struct module *owner)
{ … }
int nr_irqs = …;
EXPORT_SYMBOL_GPL(…);
static DEFINE_MUTEX(sparse_irq_lock);
static struct maple_tree sparse_irqs = …;
static int irq_find_free_area(unsigned int from, unsigned int cnt)
{ … }
static unsigned int irq_find_at_or_after(unsigned int offset)
{ … }
static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
{ … }
static void delete_irq_desc(unsigned int irq)
{ … }
#ifdef CONFIG_SPARSE_IRQ
static const struct kobj_type irq_kobj_type;
#endif
static int init_desc(struct irq_desc *desc, int irq, int node,
unsigned int flags,
const struct cpumask *affinity,
struct module *owner)
{ … }
#ifdef CONFIG_SPARSE_IRQ
static void irq_kobj_release(struct kobject *kobj);
#ifdef CONFIG_SYSFS
static struct kobject *irq_kobj_base;
#define IRQ_ATTR_RO(_name) …
static ssize_t per_cpu_count_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{ … }
IRQ_ATTR_RO(…);
static ssize_t chip_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{ … }
IRQ_ATTR_RO(…);
static ssize_t hwirq_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{ … }
IRQ_ATTR_RO(…);
static ssize_t type_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{ … }
IRQ_ATTR_RO(…);
static ssize_t wakeup_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{ … }
IRQ_ATTR_RO(…);
static ssize_t name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{ … }
IRQ_ATTR_RO(…);
static ssize_t actions_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{ … }
IRQ_ATTR_RO(…);
static struct attribute *irq_attrs[] = …;
ATTRIBUTE_GROUPS(…);
static const struct kobj_type irq_kobj_type = …;
static void irq_sysfs_add(int irq, struct irq_desc *desc)
{ … }
static void irq_sysfs_del(struct irq_desc *desc)
{ … }
static int __init irq_sysfs_init(void)
{ … }
postcore_initcall(irq_sysfs_init);
#else
static const struct kobj_type irq_kobj_type = {
.release = irq_kobj_release,
};
static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
static void irq_sysfs_del(struct irq_desc *desc) {}
#endif
struct irq_desc *irq_to_desc(unsigned int irq)
{ … }
#ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE
EXPORT_SYMBOL_GPL(irq_to_desc);
#endif
void irq_lock_sparse(void)
{ … }
void irq_unlock_sparse(void)
{ … }
static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
const struct cpumask *affinity,
struct module *owner)
{ … }
static void irq_kobj_release(struct kobject *kobj)
{ … }
static void delayed_free_desc(struct rcu_head *rhp)
{ … }
static void free_desc(unsigned int irq)
{ … }
static int alloc_descs(unsigned int start, unsigned int cnt, int node,
const struct irq_affinity_desc *affinity,
struct module *owner)
{ … }
static int irq_expand_nr_irqs(unsigned int nr)
{ … }
int __init early_irq_init(void)
{ … }
#else
struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
[0 ... NR_IRQS-1] = {
.handle_irq = handle_bad_irq,
.depth = 1,
.lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
}
};
int __init early_irq_init(void)
{
int count, i, node = first_online_node;
int ret;
init_irq_default_affinity();
printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
count = ARRAY_SIZE(irq_desc);
for (i = 0; i < count; i++) {
ret = init_desc(irq_desc + i, i, node, 0, NULL, NULL);
if (unlikely(ret))
goto __free_desc_res;
}
return arch_early_irq_init();
__free_desc_res:
while (--i >= 0) {
free_masks(irq_desc + i);
free_percpu(irq_desc[i].kstat_irqs);
}
return ret;
}
struct irq_desc *irq_to_desc(unsigned int irq)
{
return (irq < NR_IRQS) ? irq_desc + irq : NULL;
}
EXPORT_SYMBOL(irq_to_desc);
static void free_desc(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
raw_spin_lock_irqsave(&desc->lock, flags);
desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
raw_spin_unlock_irqrestore(&desc->lock, flags);
delete_irq_desc(irq);
}
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
const struct irq_affinity_desc *affinity,
struct module *owner)
{
u32 i;
for (i = 0; i < cnt; i++) {
struct irq_desc *desc = irq_to_desc(start + i);
desc->owner = owner;
irq_insert_desc(start + i, desc);
}
return start;
}
static int irq_expand_nr_irqs(unsigned int nr)
{
return -ENOMEM;
}
void irq_mark_irq(unsigned int irq)
{
mutex_lock(&sparse_irq_lock);
irq_insert_desc(irq, irq_desc + irq);
mutex_unlock(&sparse_irq_lock);
}
#ifdef CONFIG_GENERIC_IRQ_LEGACY
void irq_init_desc(unsigned int irq)
{
free_desc(irq);
}
#endif
#endif
int handle_irq_desc(struct irq_desc *desc)
{ … }
int generic_handle_irq(unsigned int irq)
{ … }
EXPORT_SYMBOL_GPL(…);
int generic_handle_irq_safe(unsigned int irq)
{ … }
EXPORT_SYMBOL_GPL(…);
#ifdef CONFIG_IRQ_DOMAIN
int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq)
{ … }
EXPORT_SYMBOL_GPL(…);
int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq)
{ … }
EXPORT_SYMBOL_GPL(…);
int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq)
{ … }
#endif
void irq_free_descs(unsigned int from, unsigned int cnt)
{ … }
EXPORT_SYMBOL_GPL(…);
int __ref
__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
struct module *owner, const struct irq_affinity_desc *affinity)
{ … }
EXPORT_SYMBOL_GPL(…);
unsigned int irq_get_next_irq(unsigned int offset)
{ … }
struct irq_desc *
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
unsigned int check)
{ … }
void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
__releases(&desc->lock)
{ … }
int irq_set_percpu_devid_partition(unsigned int irq,
const struct cpumask *affinity)
{ … }
int irq_set_percpu_devid(unsigned int irq)
{ … }
int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
{ … }
EXPORT_SYMBOL_GPL(…);
void kstat_incr_irq_this_cpu(unsigned int irq)
{ … }
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{ … }
unsigned int kstat_irqs_desc(struct irq_desc *desc, const struct cpumask *cpumask)
{ … }
static unsigned int kstat_irqs(unsigned int irq)
{ … }
#ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT
void kstat_snapshot_irqs(void)
{ … }
unsigned int kstat_get_irq_since_snapshot(unsigned int irq)
{ … }
#endif
unsigned int kstat_irqs_usr(unsigned int irq)
{ … }
#ifdef CONFIG_LOCKDEP
void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
struct lock_class_key *request_class)
{ … }
EXPORT_SYMBOL_GPL(…);
#endif