/* * * Copyright (C) 2006 AVM GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(CONFIG_PROC_FS) #include #endif #if defined(CONFIG_AVM_FASTIRQ) #define MAX_FASTIRQ_TO_LINUX_IPI_ENTRIES_ORDER 7 #define MAX_FASTIRQ_TO_LINUX_IPI_ENTRIES (1 << MAX_FASTIRQ_TO_LINUX_IPI_ENTRIES_ORDER) static int rte_linux_ipi; #define DEBUG_EXTENDED_IPI /** */ struct _firq_to_linux_ipi_queue { struct _firq_to_linux_ipi entry[MAX_FASTIRQ_TO_LINUX_IPI_ENTRIES]; atomic_t read_idx; atomic_t write_idx; unsigned int last_jiffies; unsigned int error_once; unsigned int reset_stat; /*--- Statistik nur im irq-Kontext reseten ---*/ unsigned int max_handled; unsigned int max_latency; unsigned int useless_trigger; unsigned long long trigger_cycle_sum; unsigned int trigger_cnt; unsigned int trigger_cycle; atomic_t queue_ovr; int initialized; #if defined(DEBUG_EXTENDED_IPI) unsigned long stat_count[max_ipi_type][2]; #endif/*--- #if defined(DEBUG_EXTENDED_IPI) ---*/ spinlock_t qlock; }; /** */ static inline unsigned int fastirq_queue_inc_idx(unsigned int idx) { return ((idx + 1) & (MAX_FASTIRQ_TO_LINUX_IPI_ENTRIES - 1)); } /** */ static inline unsigned int fastirq_queue_full(unsigned int read_idx, unsigned int write_idx) { if (write_idx >= read_idx) { return write_idx - read_idx; } return MAX_FASTIRQ_TO_LINUX_IPI_ENTRIES - read_idx + write_idx; } struct _firq_to_linux_ipi_queue __percpu *gFastirq_to_linux_ipi_queue; unsigned int panic_oops_on_fiq; #if defined(CONFIG_PROC_FS) static const char * const name_ipi_type[] = { [wake_up_type] "wake_up_type", [schedule_work_type] "schedule_work_type", [schedule_delayed_work_type] "schedule_delayed_work_type", [queue_work_on_type] "queue_work_on_type", [tasklet_hi_schedule_type] "tasklet_hi_schedule_type", [try_module_get_type] "try_module_get_type", [module_put_type] "module_put_type", [panic_type] "panic_type", [call_type] "call_type", [wake_up_state_type] "wake_up_state_type", }; #if defined(DEBUG_EXTENDED_IPI) /** */ static void display_extended_ipi_infos(int cpu, struct seq_file *m, struct _firq_to_linux_ipi_queue *pq) { unsigned int i; seq_printf(m, "[cpu=%d]Executed ipi-functions-sum%s:\n", cpu, pq->reset_stat ? "" : " (since last call)"); for (i = 0; i < max_ipi_type; i++) { if (pq->stat_count[i][0]) { seq_printf(m, "%26s: %16lu (%pS)\n", name_ipi_type[i], pq->stat_count[i][0], (void *)pq->stat_count[i][1]); } } } /** */ static void display_pending_ipi(int cpu, struct seq_file *m, struct _firq_to_linux_ipi_queue *pq) { register unsigned int write_idx, read_idx; unsigned long flags; struct _firq_to_linux_ipi *param; firq_spin_lock_irqsave(&pq->qlock, flags); write_idx = atomic_read(&pq->write_idx); read_idx = atomic_read(&pq->read_idx); if (read_idx != write_idx) { seq_printf(m, "[cpu=%d]%u pending ipi-functions:\n", cpu, fastirq_queue_full(read_idx, write_idx)); } while (read_idx != write_idx) { param = &pq->entry[read_idx]; seq_printf(m, "[%u] %26s: is pending since %lu s (%pS)\n", read_idx, name_ipi_type[param->ipi_func_type], (jiffies - param->ts_jiffies) / HZ, (void *)param->ret_ip); read_idx = fastirq_queue_inc_idx(read_idx); } firq_spin_unlock_irqrestore(&pq->qlock, flags); } #endif/*--- #if defined(DEBUG_EXTENDED_IPI) ---*/ /** */ static int fastirq_ipi_proc_stat(struct seq_file *m, void *data __maybe_unused) { unsigned int KHz = avm_get_cyclefreq() / 1000; int cpu; for_each_cpu(cpu, cpu_online_mask) { struct _firq_to_linux_ipi_queue *pq = per_cpu_ptr(gFastirq_to_linux_ipi_queue, cpu); if (pq->initialized == 0) { continue; } if (pq->max_handled) { seq_printf(m, "[cpu=%u]Yield-to-Linux-Statistic:\n", cpu); seq_printf(m, "\tMax-Burst-Executed: %20u\n", pq->max_handled); seq_printf(m, "\tMax-Trigger-Latency: %20lu us %s\n", avm_cycles_to_usec(pq->max_latency), pq->reset_stat ? "" : " (since last call)"); if (pq->useless_trigger) seq_printf(m, "\tUseless trigger: %20u\n", pq->useless_trigger); if (atomic_read(&pq->queue_ovr)) seq_printf(m, "\tQueue OVR: %20u\n", atomic_read(&pq->queue_ovr)); if (pq->trigger_cnt) { unsigned long long period = pq->trigger_cycle_sum; do_div(period, pq->trigger_cnt); do_div(period, KHz); seq_printf(m, "\tavg Trigger-Period: %20llu ms %s\n", period, pq->reset_stat ? "" : " (since last call)"); } #if defined(DEBUG_EXTENDED_IPI) if (pq->trigger_cnt) { display_extended_ipi_infos(cpu, m, pq); } #endif/*--- #if defined(DEBUG_EXTENDED_IPI) ---*/ } #if defined(DEBUG_EXTENDED_IPI) display_pending_ipi(cpu, m, pq); #endif/*--- #if defined(DEBUG_EXTENDED_IPI) ---*/ pq->reset_stat = 1; /*--- erst im Irq-Kontext reseten (somit geht Statistik im CRV nicht verloren falls IPI-Irq blockiert ist) ---*/ } return 0; } /** */ static void firq_to_linux_reset_stat(struct _firq_to_linux_ipi_queue *pq) { #if defined(DEBUG_EXTENDED_IPI) unsigned int i; for (i = 0; i < max_ipi_type; i++) { pq->stat_count[i][0] = 0; } #endif/*--- #if defined(DEBUG_EXTENDED_IPI) ---*/ pq->reset_stat = 0; pq->trigger_cycle_sum = 0; pq->trigger_cnt = 0; pq->max_latency = 0; } /** */ static int fastirq_ipi_proc_open(struct inode *inode, struct file *file) { return single_open(file, fastirq_ipi_proc_stat, NULL); } /** */ static const struct file_operations fastirq_ipi_proc_fops = { .open = fastirq_ipi_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /** */ static int fastirq_ipi_proc_init(void) { proc_create("fiq_ipi", 0, NULL, &fastirq_ipi_proc_fops); return 0; } #endif /*--- #if defined(CONFIG_PROC_FS) ---*/ /** */ static void fastirq_queue_init(struct _firq_to_linux_ipi_queue *pq) { firq_spin_lock_init(&pq->qlock); atomic_set(&pq->read_idx, 0); atomic_set(&pq->write_idx, 0); pq->initialized = 1; } /** * Kontext: FASTIRQ */ static int fastirq_enqueue(struct _firq_to_linux_ipi_queue *pq, struct _firq_to_linux_ipi *param, unsigned long ret_ip) { register unsigned int write_idx, read_idx, post_write_idx; unsigned long flags; BUG_ON(param->ipi_func_type >= max_ipi_type); rmb(); firq_spin_lock_irqsave(&pq->qlock, flags); write_idx = atomic_read(&pq->write_idx); read_idx = atomic_read(&pq->read_idx); post_write_idx = fastirq_queue_inc_idx(write_idx); if (unlikely(post_write_idx == read_idx)) { firq_spin_unlock_irqrestore(&pq->qlock, flags); if (pq->error_once == 0) { pq->error_once = pq->last_jiffies | 0x1; pr_err("[%s] ERROR ipi-queue overflow for %s %pS %u %u (last linux-ipi-irq before %lu s)\n", __func__, name_ipi_type[param->ipi_func_type], (void *)ret_ip, write_idx, read_idx, (jiffies - pq->last_jiffies) / HZ); } else if (((jiffies | 0x1) - pq->error_once) > (40 * HZ)) { /*--- ... nun reichts aber ... ---*/ pq->error_once = jiffies | 0x1; firq_panic("ERROR FASTIRQ-IPI-IRQ do not work\n"); } return 1; } param->ret_ip = ret_ip; param->ts_jiffies = jiffies; param->cycle = avm_get_cycles(); memcpy(&pq->entry[write_idx], param, sizeof(struct _firq_to_linux_ipi)); atomic_set(&pq->write_idx, post_write_idx); firq_spin_unlock_irqrestore(&pq->qlock, flags); return 0; } /** * Kontext: Linux-IRQ */ static int fastirq_dequeue(struct _firq_to_linux_ipi_queue *pq, struct _firq_to_linux_ipi *param) { register unsigned int write_idx, read_idx; spin_lock(&pq->qlock); rmb(); write_idx = atomic_read(&pq->write_idx); read_idx = atomic_read(&pq->read_idx); if (write_idx == read_idx) { spin_unlock(&pq->qlock); return 0; } memcpy(param, &pq->entry[read_idx], sizeof(struct _firq_to_linux_ipi)); #if defined(DEBUG_EXTENDED_IPI) pq->stat_count[param->ipi_func_type][0]++; pq->stat_count[param->ipi_func_type][1] = param->ret_ip; #endif/*--- #if defined(DEBUG_EXTENDED_IPI) ---*/ atomic_set(&pq->read_idx, fastirq_queue_inc_idx(read_idx)); spin_unlock(&pq->qlock); return 1; } /** * Aufruf nur aus firq-Kontext! * ret: 0 ok */ int firq_trigger_linux_ipi(int cpu, struct _firq_to_linux_ipi *obj) { int ret = 0; struct _firq_to_linux_ipi_queue *pq = per_cpu_ptr(gFastirq_to_linux_ipi_queue, cpu); BUG_ON(!firq_is_avm_rte()); if (unlikely(!pq)) { ret = -1; return ret; } if (unlikely(fastirq_enqueue(pq, obj, _RET_IP_))) { atomic_inc(&pq->queue_ovr); ret = -1; } avm_rte_ipi_trigger_on(cpu, rte_linux_ipi); return ret; } EXPORT_SYMBOL(firq_trigger_linux_ipi); /** * der (Linux-)IRQ-Kontext fuer Fastirq-to_Linux-IPI (per-cpu) */ static irqreturn_t fastirq_to_linux_ipi_irq(unsigned int ipi, struct pt_regs *regs, void *ctx) { struct _firq_to_linux_ipi_queue *pq = raw_cpu_ptr(gFastirq_to_linux_ipi_queue); struct _firq_to_linux_ipi params; unsigned int max_handled = 0, timediff; /*--- pr_err("[%s] read=%u write=%u\n", __func__, pq->read_idx, pq->write_idx); ---*/ while (fastirq_dequeue(pq, ¶ms)) { timediff = avm_get_cycles() - params.cycle; if (timediff > pq->max_latency) { pq->max_latency = timediff; } max_handled++; /*--- pr_err("[%s] type %u read=%u write=%u\n", __func__, params.ipi_func_type, pq->read_idx, pq->write_idx); ---*/ switch (params.ipi_func_type) { case wake_up_type: /*--- pr_err("[%s] wake_up_trigger(%p)\n", __func__, params.u.wake_up_param.q); ---*/ __wake_up(params.u.wake_up_param.q, params.u.wake_up_param.mode, params.u.wake_up_param.nr_exclusive, params.u.wake_up_param.key); break; case schedule_work_type: schedule_work(params.u.schedule_work_param.work); break; case schedule_delayed_work_type: schedule_delayed_work(params.u.schedule_delayed_work_param.dwork, params.u.schedule_delayed_work_param.delay); break; case queue_work_on_type: queue_work_on(params.u.queue_work_on_param.cpu, params.u.queue_work_on_param.wq, params.u.queue_work_on_param.work); break; case tasklet_hi_schedule_type: tasklet_hi_schedule(params.u.tasklet_hi_schedule_param.t); break; case try_module_get_type: /*--- pr_err("%s: try_module_get(%p)\n", __func__, params.u.module_param.module); ---*/ try_module_get(params.u.module_param.module); break; case module_put_type: /*--- pr_err("%s: module_put(%p)\n", __func__, params.u.module_param.module); ---*/ module_put(params.u.module_param.module); break; case panic_type: panic("%s\n", params.u.panic_param.debugstr); break; case call_type: if (params.u.call_param.func) { params.u.call_param.func(params.u.call_param.func_param); } break; case wake_up_state_type: /*--- pr_err("[%s] wake_up_state_type(%s:%p, %x)\n", __func__, params.u.wake_up_state_param.tsk->comm, params.u.wake_up_state_param.tsk, params.u.wake_up_state_param.state); ---*/ wake_up_state(params.u.wake_up_state_param.tsk, params.u.wake_up_state_param.state); put_task_struct(params.u.wake_up_state_param.tsk); break; default: pr_err("%s:unknown type %u\n", __func__, params.ipi_func_type); break; } } if (pq->reset_stat) { firq_to_linux_reset_stat(pq); } if (pq->max_handled < max_handled) { pq->max_handled = max_handled; /*--- pr_err("%s: max queuefull %u\n", __func__, max_handled); ---*/ } else if (max_handled == 0) { pq->useless_trigger++; } if (pq->trigger_cycle) { pq->trigger_cycle_sum += (unsigned long long)(avm_get_cycles() - pq->trigger_cycle); } pq->trigger_cnt++; pq->trigger_cycle = avm_get_cycles(); pq->last_jiffies = jiffies; pq->error_once = 0; return IRQ_HANDLED; } /** * very important! * if ipi-functions used in fastirq-context use it to flush/sync ipi-queues before * any free of linux-depend-data-structs (e.g. workitem) * (prevent use-after-free-accesses) * only linux-kthread-context * * timeout: in jiffies * * ret: 1 all (cpu-)queues) synced * 0 timeout * */ int firq_to_linux_sync_ipi(int timeout) { struct _firq_to_linux_ipi params; struct cpumask cpu_ipi_mask; int cpu, ret = -ERESTARTSYS; unsigned long end_jiffies = jiffies + timeout; memset(¶ms, 0, sizeof(params)); params.ipi_func_type = call_type; /* nop - only for synchronisation */ cpumask_copy(&cpu_ipi_mask, cpu_online_mask); for_each_cpu(cpu, &cpu_ipi_mask) { struct _firq_to_linux_ipi_queue *pq = per_cpu_ptr(gFastirq_to_linux_ipi_queue, cpu); fastirq_enqueue(pq, ¶ms, _RET_IP_); avm_rte_ipi_trigger_on(cpu, rte_linux_ipi); } for (;;) { for_each_cpu(cpu, &cpu_ipi_mask) { struct _firq_to_linux_ipi_queue *pq = per_cpu_ptr(gFastirq_to_linux_ipi_queue, cpu); if (atomic_read(&pq->write_idx) == atomic_read(&pq->read_idx)) { /*--- queue-empty: since this point all ipis executed ---*/ cpumask_clear_cpu(cpu, &cpu_ipi_mask); pr_err("%s: cpu=%u: %p queue empty\n", __func__, cpu, pq); } else { /*--- not really necessary, but ... ---*/ avm_rte_ipi_trigger_on(cpu, rte_linux_ipi); } } if (cpumask_empty(&cpu_ipi_mask)) { /*--- always synced ---*/ /*--- pr_err("%s: all queues flushed\n", __func__); ---*/ ret = 1; break; } if (time_after(jiffies, end_jiffies)) { pr_err("%s: cpu=%u: timeout\n", __func__, cpu); ret = 0; break; } schedule_timeout(1); } return ret; } EXPORT_SYMBOL(firq_to_linux_sync_ipi); /** * installiere auf jeder CPU ein fastirq-to-linux-irq */ static int fastirq_to_linux_ipi_init(void) { /*--- struct cpumask tmask; ---*/ int cpu, ret; gFastirq_to_linux_ipi_queue = alloc_percpu(struct _firq_to_linux_ipi_queue); if (!gFastirq_to_linux_ipi_queue) { pr_err("%s: memory allocation failed", __func__); return 1; } for_each_cpu(cpu, cpu_online_mask) { struct _firq_to_linux_ipi_queue *pq = per_cpu_ptr(gFastirq_to_linux_ipi_queue, cpu); fastirq_queue_init(pq); } ret = avm_rte_ipi_request(AVM_RTE_IPI_ALLOC_NR, fastirq_to_linux_ipi_irq, NULL, 0, 0x80, "RTE linux"); if (ret < 0) pr_err("%s: error on install irq=%u: %d\n", __func__, 11, ret); else rte_linux_ipi = ret; #if defined(CONFIG_PROC_FS) fastirq_ipi_proc_init(); #endif/*--- #if defined(CONFIG_PROC_FS) ---*/ return 0; } late_initcall(fastirq_to_linux_ipi_init); #endif/*--- #if defined(CONFIG_AVM_FASTIRQ) ---*/