/** * * Copyright (C) 2006 AVM GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _AVM_FIQ_OS_H_ #define _AVM_FIQ_OS_H_ #include #include #include #include #include #include #include #include #include #define firq_is_avm_rte() (is_cpu_mode_fiq() || in_avm_wdt_handling()) #define firq_is_avm_rte_restricted_mem_access() (is_cpu_mode_fiq()) #define firq_spin_lock_init(_lock) spin_lock_init(_lock) #define firq_raw_spin_lock_init(_rlock) raw_spin_lock_init(_rlock) extern unsigned int panic_oops_on_fiq; /** * Achtung! für alle folgenden Funktionen gilt, das sie mit der jeweiligen Instanz * konsequent umgesetzt werden. * d.h. z.B. * keine Vermengung von firq_spin_lock_..(mylock) mit spin_lock_..(mylock) * bei Verwendung des gleichen mylock's */ #if defined(CONFIG_AVM_FASTIRQ_DEBUG) static inline void __arch_spin_lock(arch_spinlock_t *lock) { unsigned int limit = avm_get_cyclefreq() * 5; /* sec */ unsigned int tstart = avm_get_cycles(); unsigned int count = 0; while (!arch_spin_trylock(lock)) { if ((avm_get_cycles() - tstart) > limit) { //printk("%s: %x %x %x\n", __func__, avm_get_cycles(), tstart, count); } count++; } } #else/*--- #if defined(CONFIG_AVM_FASTIRQ_DEBUG) ---*/ #define __arch_spin_lock(lock) arch_spin_lock(lock) #endif/*--- #if defined(CONFIG_AVM_FASTIRQ_DEBUG) ---*/ /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_spin_lock(spinlock_t *lock) { if (firq_is_avm_rte()) { __arch_spin_lock(&lock->rlock.raw_lock); return; } spin_lock(lock); } /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_spin_unlock(spinlock_t *lock) { if (firq_is_avm_rte()) { arch_spin_unlock(&lock->rlock.raw_lock); return; } spin_unlock(lock); } /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_spin_lock_bh(spinlock_t *lock) { if (firq_is_avm_rte()) { __arch_spin_lock(&lock->rlock.raw_lock); return; } spin_lock_bh(lock); } /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_spin_unlock_bh(spinlock_t *lock) { if (firq_is_avm_rte()) { arch_spin_unlock(&lock->rlock.raw_lock); return; } spin_unlock_bh(lock); } /** * Speziallock: ist auch aus FASTIRQ aufrufbar * - schuetzt for IRQ und FIRQ */ #define firq_spin_lock_irqsave(lock, flags) \ do { \ flags = avm_arch_local_fiq_and_iq_save(); \ if (firq_is_avm_rte()) { \ __arch_spin_lock(&((lock)->rlock.raw_lock)); \ } else { \ spin_lock(lock); \ } \ } while (0) /** * Speziallock: ist auch aus FASTIRQ aufrufbar * - schuetzt for IRQ und FIRQ */ #define firq_spin_trylock_irqsave(lock, flags) \ ({ \ int ret; \ flags = avm_arch_local_fiq_and_iq_save(); \ if (firq_is_avm_rte()) { \ ret = arch_spin_trylock(&((lock)->rlock.raw_lock)); \ } else { \ ret = spin_trylock(lock); \ } \ if (ret == 0) \ avm_arch_local_fiq_restore(flags); \ ret ? 1 : 0 ; \ }) /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ #define firq_spin_trylock(lock) \ ({ \ int ret; \ if (firq_is_avm_rte()) { \ ret = arch_spin_trylock(&((lock)->rlock.raw_lock)); \ } else { \ ret = spin_trylock(lock); \ } \ ret ? 1 : 0 ; \ }) /** * Speziallock: ist auch aus FASTIRQ aufrufbar * - restored IRQ und FIRQ */ static inline void firq_spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) { if (firq_is_avm_rte()) { arch_spin_unlock(&lock->rlock.raw_lock); } else { spin_unlock(lock); } avm_arch_local_fiq_restore(flags); } /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_raw_spin_lock(raw_spinlock_t *lock) { if (firq_is_avm_rte()) { __arch_spin_lock(&lock->raw_lock); return; } raw_spin_lock(lock); } /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_raw_spin_unlock(raw_spinlock_t *lock) { if (firq_is_avm_rte()) { arch_spin_unlock(&lock->raw_lock); return; } raw_spin_unlock(lock); } /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_raw_spin_lock_bh(raw_spinlock_t *lock) { if (firq_is_avm_rte()) { __arch_spin_lock(&lock->raw_lock); return; } raw_spin_lock_bh(lock); } /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_raw_spin_unlock_bh(raw_spinlock_t *lock) { if (firq_is_avm_rte()) { arch_spin_unlock(&lock->raw_lock); return; } raw_spin_unlock_bh(lock); } /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ #define firq_raw_spin_lock_irqsave(lock, flags) \ do { \ flags = avm_arch_local_fiq_and_iq_save(); \ if (firq_is_avm_rte()) { \ __arch_spin_lock(&((lock)->raw_lock)); \ } else { \ raw_spin_lock(lock); \ } \ } while (0) /** * Speziallock: ist auch aus FASTIRQ aufrufbar * - schuetzt for IRQ und FIRQ */ #define firq_raw_spin_trylock_irqsave(lock, flags) \ ({ \ int ret; \ flags = avm_arch_local_fiq_and_iq_save(); \ if (firq_is_avm_rte()) { \ ret = arch_spin_trylock(&((lock)->raw_lock)); \ } else { \ ret = raw_spin_trylock(lock); \ } \ if (ret == 0) \ avm_arch_local_fiq_restore(flags); \ ret ? 1 : 0; \ }) /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_raw_spin_lock_irq(raw_spinlock_t *lock) { if (firq_is_avm_rte()) { __arch_spin_lock(&lock->raw_lock); } else { avm_arch_local_fiq_and_iq_disable(); raw_spin_lock(lock); } } /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_raw_spin_unlock_irq(raw_spinlock_t *lock) { if (firq_is_avm_rte()) { arch_spin_unlock(&lock->raw_lock); } else { raw_spin_unlock(lock); avm_arch_local_fiq_and_iq_enable(); } } /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) { if (firq_is_avm_rte()) { arch_spin_unlock(&lock->raw_lock); } else { raw_spin_unlock(lock); } avm_arch_local_fiq_restore(flags); } /** */ #define firq_local_irq_save(flags) \ { \ flags = avm_arch_local_fiq_and_iq_save(); \ } #define firq_local_irq_restore(flags) avm_arch_local_fiq_restore(flags) /** * ist auch aus FASTIRQ aufrufbar */ static inline int firq_down_trylock(struct semaphore *sem) { unsigned long flags; int count; firq_raw_spin_lock_irqsave(&sem->lock, flags); count = sem->count - 1; if (likely(count >= 0)) sem->count = count; firq_raw_spin_unlock_irqrestore(&sem->lock, flags); return (count < 0); } /** * kein FASTIRQ Schutz notwendig, da firq_up() firq_ipi triggert und somit auch im * Linux Kontext läuft */ static inline void firq_down(struct semaphore *sem) { BUG_ON(is_cpu_mode_fiq()); down(sem); } /** */ static inline u64 firq_local_clock(void) { unsigned long flags; u64 clock; firq_local_irq_save(flags); clock = sched_clock_cpu(raw_smp_processor_id()); firq_local_irq_restore(flags); return clock; } /** */ static inline void firq_start_critical_timings(void) { if (!firq_is_avm_rte()) start_critical_timings(); } /** */ static inline void firq_stop_critical_timings(void) { if (!firq_is_avm_rte()) stop_critical_timings(); } /** * bestimmte Linuxfunktionalitaeten vom FASTIRQ aus triggern */ struct _firq_to_linux_ipi { enum _firq_to_linux_ipi_func_type { wake_up_type = 0, schedule_work_type, schedule_delayed_work_type, queue_work_on_type, tasklet_hi_schedule_type, try_module_get_type, module_put_type, panic_type, call_type, wake_up_state_type, max_ipi_type } ipi_func_type; union _firq_to_linux_ipi_params { struct _firq_wake_up_param { wait_queue_head_t *q; unsigned int mode; int nr_exclusive; void *key; } wake_up_param; struct _firq_schedule_work_param { struct work_struct *work; } schedule_work_param; struct _firq_schedule_delayed_work_param { struct delayed_work *dwork; unsigned long delay; } schedule_delayed_work_param; struct _firq_queue_work_on_param { int cpu; struct workqueue_struct *wq; struct work_struct *work; } queue_work_on_param; struct _firq_tasklet_hi_schedule_param { struct tasklet_struct *t; } tasklet_hi_schedule_param; struct _firq_module_param { struct module *module; } module_param; struct _panic_param { const char *debugstr; } panic_param; struct _call_param { void (*func)(void *func_param); void *func_param; } call_param; struct _firq_wake_up_state_param { struct task_struct *tsk; unsigned int state; } wake_up_state_param; } u; unsigned long cycle; unsigned long ts_jiffies; unsigned long ret_ip; }; /** */ extern int firq_trigger_linux_ipi(int cpu, struct _firq_to_linux_ipi *obj); /*--- #define TEST_FASTIRQ_IPI ---*/ /** * aus beliebigen Kontext verwendbar * raw_smp_processor_id() funktioniert auch im FASTIRQ-Thread (gp in thread_info mit initialisierter cpu) */ static inline void __firq_wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, void *key) { struct _firq_to_linux_ipi params; #if !defined(TEST_FASTIRQ_IPI) if (!firq_is_avm_rte()) { wake_up_interruptible(q); return; } #else unsigned long flags; firq_local_irq_save(flags); pr_err("[%s](%p)\n", __func__, q); #endif params.ipi_func_type = wake_up_type; params.u.wake_up_param.q = q; params.u.wake_up_param.mode = mode; params.u.wake_up_param.nr_exclusive = nr_exclusive; params.u.wake_up_param.key = key; firq_trigger_linux_ipi(raw_smp_processor_id(), ¶ms); #if defined(TEST_FASTIRQ_IPI) firq_local_irq_restore(flags); #endif/*--- #if defined(TEST_FASTIRQ_IP) ---*/ } #define firq_wake_up(x) __firq_wake_up(x, TASK_NORMAL, 1, NULL) #define firq_wake_up_nr(x, nr) __firq_wake_up(x, TASK_NORMAL, nr, NULL) #define firq_wake_up_all(x) __firq_wake_up(x, TASK_NORMAL, 0, NULL) #define firq_wake_up_interruptible(x) __firq_wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) #define firq_wake_up_interruptible_nr(x, nr) __firq_wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) #define firq_wake_up_interruptible_all(x) __firq_wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) /** * aus beliebigen Kontext verwendbar * raw_smp_processor_id() funktioniert auch aus FASTIRQ */ static inline bool firq_schedule_work(struct work_struct *work) { struct _firq_to_linux_ipi params; if (!firq_is_avm_rte()) { return schedule_work(work); } params.ipi_func_type = schedule_work_type; params.u.schedule_work_param.work = work; firq_trigger_linux_ipi(raw_smp_processor_id(), ¶ms); return true; } /** * aus beliebigen Kontext verwendbar */ static inline bool firq_queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) { struct _firq_to_linux_ipi params; if (!firq_is_avm_rte()) { return queue_work_on(cpu, wq, work); } params.ipi_func_type = queue_work_on_type; params.u.queue_work_on_param.cpu = cpu; params.u.queue_work_on_param.wq = wq; params.u.queue_work_on_param.work = work; if (cpu >= nr_cpu_ids || !cpu_online(cpu)) cpu = raw_smp_processor_id(); firq_trigger_linux_ipi(cpu, ¶ms); return true; } /** * aus beliebigen Kontext verwendbar * raw_smp_processor_id() funktioniert auch im FASTIRQ-Kontext (gp in thread_info mit initialisierter cpu) */ static inline bool firq_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) { struct _firq_to_linux_ipi params; if (!firq_is_avm_rte()) { return schedule_delayed_work(dwork, delay); } params.ipi_func_type = schedule_delayed_work_type; params.u.schedule_delayed_work_param.dwork = dwork; params.u.schedule_delayed_work_param.delay = delay; firq_trigger_linux_ipi(raw_smp_processor_id(), ¶ms); return true; } /** * aus beliebigen Kontext verwendbar */ static inline bool firq_try_module_get(struct module *module) { struct _firq_to_linux_ipi params; if (!firq_is_avm_rte()) { return try_module_get(module); } params.ipi_func_type = try_module_get_type; params.u.module_param.module = module; /*--- gleiche CPU wie firq_module_put() um Nebenlaeufigkeiten zu verhindern! ---*/ firq_trigger_linux_ipi(cpumask_first(cpu_online_mask), ¶ms); return true; } /** * aus beliebigen Kontext verwendbar */ static inline void firq_module_put(struct module *module) { struct _firq_to_linux_ipi params; if (!firq_is_avm_rte()) { module_put(module); return; } params.ipi_func_type = module_put_type; params.u.module_param.module = module; /*--- gleiche CPU wie firq_module_put() um Nebenlaeufigkeiten zu verhindern! ---*/ firq_trigger_linux_ipi(cpumask_first(cpu_online_mask), ¶ms); } /** * aus beliebigen Kontext verwendbar * raw_smp_processor_id() funktioniert auch im FASTIRQ-Thread (gp in thread_info mit initialisierter cpu) */ static inline void firq_tasklet_hi_schedule(struct tasklet_struct *t) { struct _firq_to_linux_ipi params; if (!firq_is_avm_rte()) { tasklet_hi_schedule(t); } if ((t->state & (1 << TASKLET_STATE_SCHED)) == 0) { params.ipi_func_type = tasklet_hi_schedule_type; params.u.tasklet_hi_schedule_param.t = t; firq_trigger_linux_ipi(raw_smp_processor_id(), ¶ms); } } /** * aus beliebigen Kontext verwendbar * raw_smp_processor_id() funktioniert auch im FASTIRQ-Thread (gp in thread_info mit initialisierter cpu) */ static inline void firq_panic(const char *debugstr) { struct _firq_to_linux_ipi params; int cpu; if (!firq_is_avm_rte()) { panic("%s\n", debugstr); return; } params.ipi_func_type = panic_type; params.u.panic_param.debugstr = debugstr; for_each_online_cpu(cpu) { firq_trigger_linux_ipi(cpu, ¶ms); /*--- trigger other cpus and hold on irq ---*/ } for (;;) ; } /** * auch aus Linux-Kontext verwendbar */ static inline void firq_call(int cpu, void (*call_func)(void *func_param), void *func_param) { struct _firq_to_linux_ipi params; params.ipi_func_type = call_type; params.u.call_param.func = call_func; params.u.call_param.func_param = func_param; if (cpu >= nr_cpu_ids || !cpu_online(cpu)) cpu = raw_smp_processor_id(); firq_trigger_linux_ipi(cpu, ¶ms); } /** */ static inline int firq_wake_up_state(struct task_struct *p, unsigned int state) { struct _firq_to_linux_ipi params; if (!firq_is_avm_rte()) { return wake_up_state(p, state); } params.ipi_func_type = wake_up_state_type; params.u.wake_up_state_param.tsk = p; params.u.wake_up_state_param.state = state; get_task_struct(p); if (firq_trigger_linux_ipi(raw_smp_processor_id(), ¶ms)) { put_task_struct(p); return false; } return true; } /** */ static inline int firq_wake_up_process(struct task_struct *p) { return firq_wake_up_state(p, TASK_NORMAL); } /** */ static inline void firq_up(struct semaphore *sem) { unsigned long flags; struct task_struct *waiter = NULL; firq_raw_spin_lock_irqsave(&sem->lock, flags); if (likely(list_empty(&sem->wait_list))) sem->count++; else waiter = __rte_sem_partial_wake(sem); firq_raw_spin_unlock_irqrestore(&sem->lock, flags); if (waiter) firq_wake_up_process(waiter); } /** * very important! * if ipi-functions used in fastirq-context use it to flush/sync ipi-queues before * any free of linux-depend-data-structs (e.g. workitem) * (prevent use-after-free-accesses) * only linux-kthread-context * timeout: in jiffies * * ret: 1 all (cpu-)queues) synced * 0 timeout * */ int firq_to_linux_sync_ipi(int timeout); #endif // #ifndef _AVM_FIQ_OS_H_