--- zzzz-none-000/linux-5.15.111/kernel/irq/manage.c 2023-05-11 14:00:40.000000000 +0000 +++ puma7-arm-6670-761/linux-5.15.111/kernel/irq/manage.c 2024-02-07 09:28:08.000000000 +0000 @@ -6,6 +6,11 @@ * This file contains driver APIs to the irq subsystem. */ +/* + * Includes Intel Corporation's changes/modifications dated: 2012-2017. + * Changed/modified portions - Copyright © 2012-2017 , Intel Corporation. + */ + #define pr_fmt(fmt) "genirq: " fmt #include @@ -851,6 +856,26 @@ enable_irq(irq); } +/** + * ack_irq - ACK handling of an irq + * @irq: Interrupt to ACK + * + * ACKs the selected interrupt line. + * + */ +void ack_irq(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + if (!desc) + { + return; + } + + irq_ack(desc); +} +EXPORT_SYMBOL(ack_irq); + static int set_irq_wake_real(unsigned int irq, unsigned int on) { struct irq_desc *desc = irq_to_desc(irq); @@ -1454,6 +1479,10 @@ setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) { struct task_struct *t; +#ifdef CONFIG_INTEL_IRQ_THREAD_CHANGE_PRIORITY + struct irq_desc *desc; + struct sched_param param = { 0, }; +#endif if (!secondary) { t = kthread_create(irq_thread, new, "irq/%d-%s", irq, @@ -1466,7 +1495,20 @@ if (IS_ERR(t)) return PTR_ERR(t); +#ifndef CONFIG_INTEL_IRQ_THREAD_CHANGE_PRIORITY sched_set_fifo(t); +#else + desc = irq_to_desc(irq); + if (desc->policy == SCHED_NORMAL) + { + set_user_nice(t, desc->sched_priority); + } + else + { + param.sched_priority = desc->sched_priority; + WARN_ON_ONCE(sched_setscheduler_nocheck(t, desc->policy, ¶m) != 0); + } +#endif /* * We keep the reference to the task struct even if @@ -1867,6 +1909,58 @@ return ret; } +#ifdef CONFIG_INTEL_IRQ_THREAD_CHANGE_PRIORITY + +/** + * irq_set_sched - set thread priority for an interrupt + * @irq: Interrupt line to set. + * @policy: irq thread policy for the interrupt (SCHED_NORMAL + * | SCHED_FIFO) + * @priority: irq thread priority for the interrupt + * For NORMAL: +19 - -20 + * For FIFO: 1 - 99. + * + * Used to statically set the interrupt thread policy and + * priority. + * + * On failure, it returns MULL. + * On success, it return to IRQ thread task structure. + */ +struct task_struct *irq_set_sched(unsigned int irq, int policy, unsigned int priority) +{ + unsigned long flags; + struct sched_param param; + struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); + + if (!desc) + return NULL; + + if (WARN(!desc->irq_data.chip, KERN_ERR "irq_set_sched before setup/request_irq: irq %u\n", irq)) + { + irq_put_desc_busunlock(desc, flags); + return NULL; + } + + desc->policy = policy; + desc->sched_priority = priority; + + if (desc->policy == SCHED_NORMAL) + { + set_user_nice(desc->action->thread, desc->sched_priority); + } + else + { + param.sched_priority = desc->sched_priority; + sched_setscheduler_nocheck(desc->action->thread, desc->policy, ¶m); + } + + irq_put_desc_busunlock(desc, flags); + + return desc->action->thread; +} +EXPORT_SYMBOL_GPL(irq_set_sched); +#endif + /* * Internal function to unregister an irqaction - used to free * regular and special interrupts that are part of the architecture.