// SPDX-License-Identifier: GPL-2.0+ #define pr_fmt(fmt) "[rte_ipi] " fmt #include #include #include #include #include #include #include #include #include "rte_gic.h" enum { _IPI_FLAG_USABLE = 1 << 23, _IPI_FLAG_BUSY = 1 << 24, }; struct ipi_desc { int nr; int flags; unsigned int prio; const char *name; avm_rte_ipi_handler_t handler; void *handler_ctx; }; /* * ARM GIC has 16 SGIs available. However some are reserved by * linux for its use. Others may be used by the TZ. * * Here we can already reserve linux used IPIs. IPIs that are then unused * by the TZ can be declared inside the device tree. * * min/max values are inclusive. */ #define IPI_MIN_USABLE (NR_IPI + 1) #define IPI_MAX_USABLE 14 #define IPI_DESC_COUNT (IPI_MAX_USABLE - IPI_MIN_USABLE + 1) #define IPI_IDX(_ipi) ((_ipi) - IPI_MIN_USABLE) struct ipi_stats { int irq[IPI_DESC_COUNT]; }; static DEFINE_MUTEX(ipi_mutex); static struct ipi_desc ipi_descs[IPI_DESC_COUNT]; static DEFINE_PER_CPU_ALIGNED(struct ipi_stats, ipi_stats); #define __IPI_IRQ_STAT(cpu, _ipi) per_cpu(ipi_stats.irq[IPI_IDX(_ipi)], cpu) static inline unsigned int cpumask_to_map(const struct cpumask *mask) { int cpu; unsigned int map = 0; BUILD_BUG_ON(NR_CPUS >= 32); for_each_cpu(cpu, mask) map |= (1 << cpu); return map; } static void setup_ipi(void *ctx) { struct ipi_desc *desc = ctx; bool is_secure = desc->flags & AVM_RTE_IPI_SECURE; avm_gic_fiq_configure(desc->nr, cpu_possible_mask, desc->prio, 1 /* EDGE rising */, !is_secure); } static void disable_ipi(void *ctx) { struct ipi_desc *desc = ctx; avm_gic_fiq_disable(desc->nr, cpumask_to_map(cpu_possible_mask)); } static inline struct ipi_desc *ipi_to_desc(int ipi_nr) { if (ipi_nr < IPI_MIN_USABLE || ipi_nr > IPI_MAX_USABLE) return NULL; return &ipi_descs[IPI_IDX(ipi_nr)]; } static inline struct ipi_desc *find_unused_ipi(void) { struct ipi_desc *desc; unsigned int i; for (i = 0; i < ARRAY_SIZE(ipi_descs); i++) { desc = &ipi_descs[i]; if (desc->flags & _IPI_FLAG_BUSY) continue; if (desc->flags & _IPI_FLAG_USABLE) return desc; } return NULL; } int avm_rte_ipi_request(int ipi, avm_rte_ipi_handler_t handler, void *ctx, int flags, unsigned int prio, const char *name) { struct ipi_desc *desc; int ret; mutex_lock(&ipi_mutex); if (ipi == AVM_RTE_IPI_ALLOC_NR) { desc = find_unused_ipi(); if (!desc) { ret = -ENOSPC; goto out; } } else { desc = ipi_to_desc(ipi); if (!desc || !(desc->flags & _IPI_FLAG_USABLE)) { ret = -EINVAL; goto out; } if (desc->flags & _IPI_FLAG_BUSY) { ret = -EBUSY; goto out; } } desc->flags = flags | _IPI_FLAG_USABLE; desc->prio = prio; desc->name = name; desc->handler = handler; desc->handler_ctx = ctx; /* * IPIs are cpu local, so we need to setup them on all cpus. */ on_each_cpu(setup_ipi, desc, true); pr_notice("Requested ipi %s (nr=%d, prio=%02x, flags=0x%08x)\n", desc->name, desc->nr, desc->prio, desc->flags); desc->flags |= _IPI_FLAG_BUSY; ret = desc->nr; out: mutex_unlock(&ipi_mutex); return ret; } int avm_rte_ipi_free(int ipi) { struct ipi_desc *desc; int err = 0; if (ipi < 0) return -EINVAL; mutex_lock(&ipi_mutex); desc = ipi_to_desc(ipi); if (!desc || !(desc->flags & _IPI_FLAG_USABLE) || !(desc->flags & _IPI_FLAG_BUSY)) { err = -EINVAL; goto out; } on_each_cpu(disable_ipi, desc, true); pr_notice("Free ipi %s (nr=%d)\n", desc->name, desc->nr); desc->prio = 0; desc->name = NULL; desc->handler = NULL; desc->handler_ctx = NULL; desc->flags = _IPI_FLAG_USABLE; out: mutex_unlock(&ipi_mutex); return err; } int avm_rte_ipi_trigger(const struct cpumask *mask, unsigned int ipi) { struct ipi_desc *desc; unsigned int map; /* * There is no need to lock against adding/removing ipi's here. * * 1) On add: * * If somebody calls use before register is completed we may * or may not see that we can call this ipi. * Either case the registration is not done yet, so at worst * the ipi will be delivered. * * 2) On remove: * * If a remove is in progress we may see a valid registration * here and will trigger the ipi. * * However this is safe, as the remove is only completed when * all cpus have disabled the corresponding ipi pin. Therefore * our spurious trigger will be masked away. */ desc = ipi_to_desc(ipi); if (!desc || !(desc->flags & _IPI_FLAG_BUSY)) return -EINVAL; map = cpumask_to_map(mask); if (map & ~0xF) return -EINVAL; pr_debug("trigger IPI %d on %*pbl\n", ipi, cpumask_pr_args(mask)); dmb(ishst); /* We can not simply trigger a secure IPI. We need to become secure first */ if (desc->flags & AVM_RTE_IPI_SECURE) rte_gicd_write(GIC_DIST_SOFTINT, (map << 16) | desc->nr); /* We may be in secure, so set SATT to request non-secure ipi */ else rte_gicd_write(GIC_DIST_SOFTINT, (map << 16) | (1 << 15) | desc->nr); return 0; } EXPORT_SYMBOL(avm_rte_ipi_trigger); int __init avm_rte_ipi_init(void) { struct device_node *np; int i, count, usable = 0; for (i = IPI_MIN_USABLE; i <= IPI_MAX_USABLE; i++) { ipi_descs[IPI_IDX(i)].nr = i; } np = of_find_compatible_node(NULL, NULL, "avm,rt_framework"); if (!np) return -EINVAL; count = of_property_count_u32_elems(np, "ipi-gic-sgi"); for (i = 0; i < count; i++) { u32 ipi; struct ipi_desc *desc; of_property_read_u32_index(np, "ipi-gic-sgi", i, &ipi); desc = ipi_to_desc(ipi); if (!desc) { pr_err("Invalid IPI number %d\n", ipi); continue; } desc->flags = _IPI_FLAG_USABLE; usable++; } pr_info("Available IPIs %d\n", usable); return 0; } core_initcall(avm_rte_ipi_init); irqreturn_t avm_rte_ipi_handle(unsigned int ipi, struct pt_regs *regs) { struct ipi_desc *desc; irqreturn_t ret; /* * There is no need to lock against adding/removing ipi's here * * 1) On add * * No body can trigger this ipi if the desc is invalid. * * 2) On remove: * * The remove will disable the ipi on every core as a lowest * priority non-rte linux ipi. This will only return once * this has been completed. So it ensured that the ipi is masked * when the desc becomes invalid. */ desc = ipi_to_desc(ipi); if (!desc || !(desc->flags & _IPI_FLAG_BUSY)) return IRQ_NONE; __IPI_IRQ_STAT(raw_smp_processor_id(), desc->nr)++; if (desc->flags & AVM_RTE_IPI_SECURE) ret = desc->handler(desc->nr, regs, desc->handler_ctx); else { irq_enter(); ret = desc->handler(desc->nr, regs, desc->handler_ctx); irq_exit(); } return ret; } void avm_rte_ipi_show_list(struct seq_file *p, int prec, bool secure) { struct ipi_desc *desc; int i, cpu; for (i = 0; i < IPI_DESC_COUNT; i++) { desc = &ipi_descs[i]; if (!(desc->flags & _IPI_FLAG_BUSY)) continue; if (secure && !(desc->flags & AVM_RTE_IPI_SECURE)) continue; if (!secure && (desc->flags & AVM_RTE_IPI_SECURE)) continue; sseq_printf(p, "%*s%u: ", prec - 1, "IPI", desc->nr); for_each_online_cpu(cpu) sseq_printf(p, "%10u ", __IPI_IRQ_STAT(cpu, desc->nr)); sseq_printf(p, " %s\n", desc->name); } }