--- zzzz-none-000/linux-3.10.107/kernel/irq/irqdesc.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/kernel/irq/irqdesc.c 2021-02-04 17:41:59.000000000 +0000 @@ -14,14 +14,63 @@ #include #include #include +#include +#include +#ifdef CONFIG_STOPWATCH_HARD_IRQ +#define __STOPWATCH_USE__ +#endif +#include #include "internals.h" +DEFINE_STOPWATCH_ARRAY(hardirq, NR_IRQS + 2); + /* * lockdep: we want to handle all irq_desc locks as a single lock-class: */ static struct lock_class_key irq_desc_lock_class; +#ifdef __STOPWATCH_USE__ +int stopwatch_hardirq_show(struct seq_file *f, void *v) +{ + struct irqaction *ap; + int irq = *((loff_t *) v); + struct irq_desc *desc = irq_to_desc(irq); + int cpu; + + if (irq == 0) + seq_printf(f, "%20s\tCPU\tmin(us)\tavg(us)\tmax(us)\n\n", ""); + + if (irq == (NR_IRQS + 1)) + seq_printf(f, "%-20s", "softirq"); + else { + ap = desc->action; + if (!ap || (desc->handle_irq == handle_bad_irq)) + return 0; + else + seq_printf(f, "%3d:%-17s", irq, ap->name); + } + + for_each_cpu(cpu, cpu_online_mask) { + if (!cpu) + seq_printf(f, "\t%d", cpu); + else + seq_printf(f, "%20s\t%d", "", cpu); + stopwatch_show(&STOPWATCH_INSTANCE_CPU(hardirq[irq], cpu), f, STOPWATCH_MICRO); + seq_printf(f, "\n"); + } + seq_printf(f, "\n"); + return 0; +} + +static int __init hardirq_stopwatch_init(void) +{ + INIT_STOPWATCH_ARRAY(hardirq, NR_IRQS + 2); + return stopwatch_register("hardirq", NR_IRQS + 2, stopwatch_hardirq_show); +} +module_init(hardirq_stopwatch_init) +#endif + #if defined(CONFIG_SMP) static void __init init_irq_default_affinity(void) { @@ -37,12 +86,13 @@ #ifdef CONFIG_SMP static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { - if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) + if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, + gfp, node)) return -ENOMEM; #ifdef CONFIG_GENERIC_PENDING_IRQ if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { - free_cpumask_var(desc->irq_data.affinity); + free_cpumask_var(desc->irq_common_data.affinity); return -ENOMEM; } #endif @@ -51,23 +101,19 @@ static void desc_smp_init(struct irq_desc *desc, int node) { - desc->irq_data.node = node; - cpumask_copy(desc->irq_data.affinity, irq_default_affinity); + cpumask_copy(desc->irq_common_data.affinity, irq_default_affinity); #ifdef CONFIG_GENERIC_PENDING_IRQ cpumask_clear(desc->pending_mask); #endif -} - -static inline int desc_node(struct irq_desc *desc) -{ - return desc->irq_data.node; +#ifdef CONFIG_NUMA + desc->irq_common_data.node = node; +#endif } #else static inline int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } static inline void desc_smp_init(struct irq_desc *desc, int node) { } -static inline int desc_node(struct irq_desc *desc) { return 0; } #endif static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, @@ -75,11 +121,13 @@ { int cpu; + desc->irq_common_data.handler_data = NULL; + desc->irq_common_data.msi_desc = NULL; + + desc->irq_data.common = &desc->irq_common_data; desc->irq_data.irq = irq; desc->irq_data.chip = &no_irq_chip; desc->irq_data.chip_data = NULL; - desc->irq_data.handler_data = NULL; - desc->irq_data.msi_desc = NULL; irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); desc->handle_irq = handle_bad_irq; @@ -125,12 +173,22 @@ #ifdef CONFIG_GENERIC_PENDING_IRQ free_cpumask_var(desc->pending_mask); #endif - free_cpumask_var(desc->irq_data.affinity); + free_cpumask_var(desc->irq_common_data.affinity); } #else static inline void free_masks(struct irq_desc *desc) { } #endif +void irq_lock_sparse(void) +{ + mutex_lock(&sparse_irq_lock); +} + +void irq_unlock_sparse(void) +{ + mutex_unlock(&sparse_irq_lock); +} + static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) { struct irq_desc *desc; @@ -167,6 +225,12 @@ unregister_irq_proc(irq, desc); + /* + * sparse_irq_lock protects also show_interrupts() and + * kstat_irq_usr(). Once we deleted the descriptor from the + * sparse tree we can free it. Access in proc will fail to + * lookup the descriptor. + */ mutex_lock(&sparse_irq_lock); delete_irq_desc(irq); mutex_unlock(&sparse_irq_lock); @@ -278,7 +342,12 @@ static void free_desc(unsigned int irq) { - dynamic_irq_cleanup(irq); + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + raw_spin_lock_irqsave(&desc->lock, flags); + desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL); + raw_spin_unlock_irqrestore(&desc->lock, flags); } static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, @@ -299,6 +368,20 @@ return -ENOMEM; } +void irq_mark_irq(unsigned int irq) +{ + mutex_lock(&sparse_irq_lock); + bitmap_set(allocated_irqs, irq, 1); + mutex_unlock(&sparse_irq_lock); +} + +#ifdef CONFIG_GENERIC_IRQ_LEGACY +void irq_init_desc(unsigned int irq) +{ + free_desc(irq); +} +#endif + #endif /* !CONFIG_SPARSE_IRQ */ /** @@ -312,11 +395,66 @@ if (!desc) return -EINVAL; - generic_handle_irq_desc(irq, desc); + generic_handle_irq_desc(desc); return 0; } EXPORT_SYMBOL_GPL(generic_handle_irq); +#ifdef CONFIG_HANDLE_DOMAIN_IRQ +/** + * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain + * @domain: The domain where to perform the lookup + * @hwirq: The HW irq number to convert to a logical one + * @lookup: Whether to perform the domain lookup or not + * @regs: Register file coming from the low-level handling code + * + * Returns: 0 on success, or -EINVAL if conversion has failed + */ +int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, + bool lookup, struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + unsigned int irq = hwirq; + int ret = 0; + + irq_enter(); + +#ifdef CONFIG_IRQ_DOMAIN + if (lookup) + irq = irq_find_mapping(domain, hwirq); +#endif + + /* + * Some hardware gives randomly wrong interrupts. Rather + * than crashing, do something sensible. + */ + if (unlikely(!irq || irq >= nr_irqs)) { + ack_bad_irq(irq); + ret = -EINVAL; + } else { +#ifndef __STOPWATCH_USE__ + generic_handle_irq(irq); +#else + if (irq > NR_IRQS) { + STOPWATCH_START(hardirq[NR_IRQS]); + generic_handle_irq(irq); + STOPWATCH_STOP(hardirq[NR_IRQS]); + } else { + STOPWATCH_START(hardirq[irq]); + generic_handle_irq(irq); + STOPWATCH_STOP(hardirq[irq]); + } +#endif + } + + STOPWATCH_START(hardirq[NR_IRQS + 1]); + irq_exit(); + STOPWATCH_STOP(hardirq[NR_IRQS + 1]); + set_irq_regs(old_regs); + return ret; +} +#endif + /* Dynamic interrupt handling */ /** @@ -363,6 +501,13 @@ if (from > irq) return -EINVAL; from = irq; + } else { + /* + * For interrupts which are freely allocated the + * architecture can force a lower bound to the @from + * argument. x86 uses this to exclude the GSI space. + */ + from = arch_dynirq_lower_bound(from); } mutex_lock(&sparse_irq_lock); @@ -389,30 +534,56 @@ } EXPORT_SYMBOL_GPL(__irq_alloc_descs); +#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ /** - * irq_reserve_irqs - mark irqs allocated - * @from: mark from irq number - * @cnt: number of irqs to mark + * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware + * @cnt: number of interrupts to allocate + * @node: node on which to allocate * - * Returns 0 on success or an appropriate error code + * Returns an interrupt number > 0 or 0, if the allocation fails. */ -int irq_reserve_irqs(unsigned int from, unsigned int cnt) +unsigned int irq_alloc_hwirqs(int cnt, int node) { - unsigned int start; - int ret = 0; + int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL); - if (!cnt || (from + cnt) > nr_irqs) - return -EINVAL; + if (irq < 0) + return 0; - mutex_lock(&sparse_irq_lock); - start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); - if (start == from) - bitmap_set(allocated_irqs, start, cnt); - else - ret = -EEXIST; - mutex_unlock(&sparse_irq_lock); - return ret; + for (i = irq; cnt > 0; i++, cnt--) { + if (arch_setup_hwirq(i, node)) + goto err; + irq_clear_status_flags(i, _IRQ_NOREQUEST); + } + return irq; + +err: + for (i--; i >= irq; i--) { + irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); + arch_teardown_hwirq(i); + } + irq_free_descs(irq, cnt); + return 0; +} +EXPORT_SYMBOL_GPL(irq_alloc_hwirqs); + +/** + * irq_free_hwirqs - Free irq descriptor and cleanup the hardware + * @from: Free from irq number + * @cnt: number of interrupts to free + * + */ +void irq_free_hwirqs(unsigned int from, int cnt) +{ + int i, j; + + for (i = from, j = cnt; j > 0; i++, j--) { + irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); + arch_teardown_hwirq(i); + } + irq_free_descs(from, cnt); } +EXPORT_SYMBOL_GPL(irq_free_hwirqs); +#endif /** * irq_get_next_irq - get next allocated irq number @@ -475,20 +646,20 @@ return 0; } -/** - * dynamic_irq_cleanup - cleanup a dynamically allocated irq - * @irq: irq number to initialize - */ -void dynamic_irq_cleanup(unsigned int irq) +void kstat_incr_irq_this_cpu(unsigned int irq) { - struct irq_desc *desc = irq_to_desc(irq); - unsigned long flags; - - raw_spin_lock_irqsave(&desc->lock, flags); - desc_set_defaults(irq, desc, desc_node(desc), NULL); - raw_spin_unlock_irqrestore(&desc->lock, flags); + kstat_incr_irqs_this_cpu(irq_to_desc(irq)); } +/** + * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu + * @irq: The interrupt number + * @cpu: The cpu number + * + * Returns the sum of interrupt counts on @cpu since boot for + * @irq. The caller must ensure that the interrupt is not removed + * concurrently. + */ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) { struct irq_desc *desc = irq_to_desc(irq); @@ -497,11 +668,19 @@ *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; } +/** + * kstat_irqs - Get the statistics for an interrupt + * @irq: The interrupt number + * + * Returns the sum of interrupt counts on all cpus since boot for + * @irq. The caller must ensure that the interrupt is not removed + * concurrently. + */ unsigned int kstat_irqs(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); int cpu; - int sum = 0; + unsigned int sum = 0; if (!desc || !desc->kstat_irqs) return 0; @@ -509,3 +688,22 @@ sum += *per_cpu_ptr(desc->kstat_irqs, cpu); return sum; } + +/** + * kstat_irqs_usr - Get the statistics for an interrupt + * @irq: The interrupt number + * + * Returns the sum of interrupt counts on all cpus since boot for + * @irq. Contrary to kstat_irqs() this can be called from any + * preemptible context. It's protected against concurrent removal of + * an interrupt descriptor when sparse irqs are enabled. + */ +unsigned int kstat_irqs_usr(unsigned int irq) +{ + unsigned int sum; + + irq_lock_sparse(); + sum = kstat_irqs(irq); + irq_unlock_sparse(); + return sum; +}