--- zzzz-none-000/linux-5.15.111/arch/x86/kernel/apic/vector.c 2023-05-11 14:00:40.000000000 +0000 +++ puma7-atom-6670-761/linux-5.15.111/arch/x86/kernel/apic/vector.c 2024-02-07 10:22:35.000000000 +0000 @@ -13,6 +13,10 @@ #include #include #include +#ifdef CONFIG_INTEL_PCI_MULTI_MSI +#include /* struct pci_dev */ +#include /* struct msi_desc */ +#endif /* CONFIG_INTEL_PCI_MULTI_MSI */ #include #include #include @@ -34,7 +38,12 @@ unsigned int move_in_progress : 1, is_managed : 1, can_reserve : 1, +#ifdef CONFIG_INTEL_PCI_MULTI_MSI + has_reserved : 1, + multimsi_enable : 1; +#else has_reserved : 1; +#endif /* CONFIG_INTEL_PCI_MULTI_MSI */ }; struct irq_domain *x86_vector_domain; @@ -375,7 +384,10 @@ /* If the interrupt has a global reservation, nothing to do */ if (apicd->has_reserved) return; - +#ifdef CONFIG_INTEL_PCI_MULTI_MSI + if (apicd->multimsi_enable) + return; +#endif /* CONFIG_INTEL_PCI_MULTI_MSI */ raw_spin_lock_irqsave(&vector_lock, flags); clear_irq_vector(irqd); if (apicd->can_reserve) @@ -450,7 +462,10 @@ trace_vector_activate(irqd->irq, apicd->is_managed, apicd->can_reserve, reserve); - +#ifdef CONFIG_INTEL_PCI_MULTI_MSI + if (apicd->multimsi_enable) + return 0; +#endif /* CONFIG_INTEL_PCI_MULTI_MSI */ raw_spin_lock_irqsave(&vector_lock, flags); if (!apicd->can_reserve && !apicd->is_managed) assign_irq_vector_any_locked(irqd); @@ -528,6 +543,138 @@ return realloc; } +#ifdef CONFIG_INTEL_PCI_MULTI_MSI +static int +assign_vector_locked_block(struct irq_data *irqd, const struct cpumask *dest, int count) +{ + struct apic_chip_data *apicd = apic_chip_data(irqd); + bool resvd = apicd->has_reserved; + unsigned int cpu = apicd->cpu; + int vector = apicd->vector; + + lockdep_assert_held(&vector_lock); + + /* + * If the current target CPU is online and in the new requested + * affinity mask, there is no point in moving the interrupt from + * one CPU to another. + */ + if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest)) + return 0; + + /* + * Careful here. @apicd might either have move_in_progress set or + * be enqueued for cleanup. Assigning a new vector would either + * leave a stale vector on some CPU around or in case of a pending + * cleanup corrupt the hlist. + */ + if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist)) + return -EBUSY; + + vector = irq_matrix_alloc_block(vector_matrix, dest, resvd, &cpu, count); + trace_vector_alloc(irqd->irq, vector, resvd, vector); + if (vector < 0) + return vector; + apic_update_vector(irqd, vector, cpu); + apic_update_irq_cfg(irqd, vector, cpu); + + return 0; +} + +static int assign_irq_vector_block(struct irq_data *irqd, const struct cpumask *dest, int count) +{ + unsigned long flags; + int ret; + + raw_spin_lock_irqsave(&vector_lock, flags); + cpumask_and(vector_searchmask, dest, cpu_online_mask); + ret = assign_vector_locked_block(irqd, vector_searchmask, count); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return ret; +} + +static int x86_vector_alloc_irqs_block(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct irq_alloc_info *info = arg; + struct apic_chip_data *apicd, *first_apicd; + struct irq_data *irqd; + int i, err, node; + + /* + * Catch any attempt to touch the cascade interrupt on a PIC + * equipped system. + */ + if (WARN_ON_ONCE(info->flags & X86_IRQ_ALLOC_LEGACY && + virq == PIC_CASCADE_IR)) + return -EINVAL; + + for (i = 0; i < nr_irqs; i++) { + irqd = irq_domain_get_irq_data(domain, virq + i); + BUG_ON(!irqd); + node = irq_data_get_node(irqd); + WARN_ON_ONCE(irqd->chip_data); + apicd = alloc_apic_chip_data(node); + if (unlikely(!apicd)) { + err = -ENOMEM; + goto error; + } + + apicd->irq = virq + i; + apicd->multimsi_enable = true; + irqd->chip = &lapic_controller; + irqd->chip_data = apicd; + irqd->hwirq = virq + i; + irqd_set_single_target(irqd); + /* + * Prevent that any of these interrupts is invoked in + * non interrupt context via e.g. generic_handle_irq() + * as that can corrupt the affinity move state. + */ + irqd_set_handle_enforce_irqctx(irqd); + + /* Don't invoke affinity setter on deactivated interrupts */ + irqd_set_affinity_on_activate(irqd); + + /* + * Legacy vectors are already assigned when the IOAPIC + * takes them over. They stay on the same vector. This is + * required for check_timer() to work correctly as it might + * switch back to legacy mode. Only update the hardware + * config. + */ + if (info->flags & X86_IRQ_ALLOC_LEGACY) { + if (!vector_configure_legacy(virq + i, irqd, apicd)) + continue; + } + + if (i == 0) { + first_apicd = apicd; + err = assign_irq_vector_block(irqd, cpu_online_mask, + nr_irqs); + } else { + apic_update_vector(irqd, first_apicd->vector + i, + first_apicd->cpu); + apic_update_irq_cfg(irqd, first_apicd->vector + i, + first_apicd->cpu); + err = 0; + } + trace_vector_setup(virq + i, false, err); + if (err) { + irqd->chip_data = NULL; + free_apic_chip_data(apicd); + goto error; + } + } + + return 0; + +error: + x86_vector_free_irqs(domain, virq, i + (i < nr_irqs)); + return err; +} +#endif /* CONFIG_INTEL_PCI_MULTI_MSI */ + static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { @@ -540,8 +687,15 @@ return -ENXIO; /* Currently vector allocator can't guarantee contiguous allocations */ - if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1) + if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && (nr_irqs > 1)) { +#ifdef CONFIG_INTEL_PCI_MULTI_MSI + if (msi_desc_to_pci_dev(info->desc)->mmsi_support) { + pr_debug("allocating contiguous virtual irqs vector: %d-%d...\n", virq, virq + nr_irqs - 1); + return x86_vector_alloc_irqs_block(domain, virq, nr_irqs, arg); + } +#endif /* CONFIG_INTEL_PCI_MULTI_MSI */ return -ENOSYS; + } /* * Catch any attempt to touch the cascade interrupt on a PIC @@ -860,6 +1014,15 @@ if (WARN_ON_ONCE(!irqd_is_activated(irqd))) return -EIO; +#ifdef CONFIG_INTEL_PCI_MULTI_MSI + if (irqd->common && + irqd->common->msi_desc && + irqd->common->msi_desc->msi_attrib.multiple) { + /* TODO: setting cpu affinity for multiple msi isn't supported yet */ + return -ENOSYS; + } +#endif /* CONFIG_INTEL_PCI_MULTI_MSI */ + raw_spin_lock(&vector_lock); cpumask_and(vector_searchmask, dest, cpu_online_mask); if (irqd_affinity_is_managed(irqd)) @@ -867,7 +1030,8 @@ else err = assign_vector_locked(irqd, vector_searchmask); raw_spin_unlock(&vector_lock); - return err ? err : IRQ_SET_MASK_OK; + + return err ? err : IRQ_SET_MASK_OK; } #else