/*------------------------------------------------------------------------------------------*\ * * Copyright (C) 2006 AVM GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA \*------------------------------------------------------------------------------------------*/ #ifndef _AVM_FIQ_OS_H_ #define _AVM_FIQ_OS_H_ #if defined(CONFIG_AVM_FIQ_PUMA6) || defined(CONFIG_AVM_FIQ_PUMA7) #include #include #include #include #define firq_is_avm_rte() is_cpu_mode_fiq() #define __arch_spin_lock(lock) arch_spin_lock(lock) #define firq_spin_lock_init(_lock) spin_lock_init(_lock) /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_spin_lock(spinlock_t *lock) { if (firq_is_avm_rte()) { __arch_spin_lock(&lock->rlock.raw_lock); return; } spin_lock(lock); } /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_spin_unlock(spinlock_t *lock) { if (firq_is_avm_rte()) { arch_spin_unlock(&lock->rlock.raw_lock); return; } spin_unlock(lock); } /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_spin_lock_bh(spinlock_t *lock) { if (firq_is_avm_rte()) { __arch_spin_lock(&lock->rlock.raw_lock); return; } spin_lock_bh(lock); } /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_spin_unlock_bh(spinlock_t *lock) { if (firq_is_avm_rte()) { arch_spin_unlock(&lock->rlock.raw_lock); return; } spin_unlock_bh(lock); } /*--------------------------------------------------------------------------------*\ * Speziallock: ist auch aus FASTIRQ aufrufbar * - schuetzt for IRQ und FIRQ \*--------------------------------------------------------------------------------*/ #define firq_spin_lock_irqsave(lock, flags) \ do { \ flags = avm_arch_local_fiq_and_iq_save(); \ if(firq_is_avm_rte()) { \ __arch_spin_lock(&((lock)->rlock.raw_lock)); \ } else { \ spin_lock(lock); \ } \ } while (0) /*--------------------------------------------------------------------------------*\ * Speziallock: ist auch aus FASTIRQ aufrufbar * - restored IRQ und FIRQ \*--------------------------------------------------------------------------------*/ static inline void firq_spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) { if(firq_is_avm_rte()) { arch_spin_unlock(&lock->rlock.raw_lock); } else { spin_unlock(lock); } avm_arch_local_fiq_restore(flags); } /** * Speziallock: ist auch aus FASTIRQ aufrufbar * - schuetzt for IRQ und FIRQ */ #define firq_spin_trylock_irqsave(lock, flags) \ ({ \ int ret; \ flags = avm_arch_local_fiq_and_iq_save(); \ if (firq_is_avm_rte()) { \ ret = arch_spin_trylock(&((lock)->rlock.raw_lock)); \ } else { \ ret = spin_trylock(lock); \ } \ if (ret == 0) \ avm_arch_local_fiq_restore(flags); \ ret ? 1 : 0 ; \ }) /** */ #define firq_local_irq_save(flags) \ { \ flags = avm_arch_local_fiq_and_iq_save(); \ } #define firq_local_irq_restore(flags) avm_arch_local_fiq_restore(flags) /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_raw_spin_lock(raw_spinlock_t *raw_lock) { if (firq_is_avm_rte()) { __arch_spin_lock(raw_lock); return; } raw_spin_lock(raw_lock); } /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_raw_spin_unlock(raw_spinlock_t *raw_lock) { if (firq_is_avm_rte()) { arch_spin_unlock(&raw_lock->raw_lock); return; } raw_spin_unlock(raw_lock); } /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_raw_spin_lock_irq(raw_spinlock_t *raw_lock) { if (firq_is_avm_rte()) { __arch_spin_lock(raw_lock); } else { avm_arch_local_fiq_and_iq_save(); raw_spin_lock(raw_lock); } } /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_raw_spin_unlock_irq(raw_spinlock_t *raw_lock) { if (firq_is_avm_rte()) { arch_spin_unlock(&raw_lock->raw_lock); } else { raw_spin_unlock(raw_lock); avm_arch_local_fiq_and_iq_enable(); } } /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ #define firq_raw_spin_lock_irqsave(raw_lock, flags) \ do { \ flags = avm_arch_local_fiq_and_iq_save(); \ if (firq_is_avm_rte()) { \ __arch_spin_lock(raw_lock); \ } else { \ raw_spin_lock(raw_lock); \ } \ } while (0) /** * Speziallock: ist auch aus FASTIRQ aufrufbar * - schuetzt for IRQ und FIRQ */ #define firq_raw_spin_trylock_irqsave(raw_lock, flags) \ ({ \ int ret; \ flags = avm_arch_local_fiq_and_iq_save(); \ if (firq_is_avm_rte()) { \ ret = arch_spin_trylock(raw_lock); \ } else { \ ret = raw_spin_trylock(raw_lock); \ } \ if (ret == 0) \ avm_arch_local_fiq_restore(flags); \ ret ? 1 : 0; \ }) /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ #define firq_spin_trylock(lock) \ ({ \ int ret; \ if (firq_is_avm_rte()) { \ ret = arch_spin_trylock(&((lock)->rlock.raw_lock)); \ } else { \ ret = spin_trylock(lock); \ } \ ret ? 1 : 0 ; \ }) /** * Speziallock: ist auch aus FASTIRQ aufrufbar */ static inline void firq_raw_spin_unlock_irqrestore(raw_spinlock_t *raw_lock, unsigned long flags) { if (firq_is_avm_rte()) { arch_spin_unlock(&raw_lock->raw_lock); } else { raw_spin_unlock(raw_lock); } avm_arch_local_fiq_restore(flags); } /** * ist auch aus FASTIRQ aufrufbar */ static inline int firq_down_trylock(struct semaphore *sem) { unsigned long flags; int count; firq_raw_spin_lock_irqsave(&sem->lock, flags); count = sem->count - 1; if (likely(count >= 0)) sem->count = count; firq_raw_spin_unlock_irqrestore(&(sem->lock), flags); return (count < 0); } /** * kein FASTIRQ Schutz notwendig, da firq_up() firq_ipi triggert und somit auch im * Linux Kontext läuft */ static inline void firq_down(struct semaphore *sem) { BUG_ON(firq_is_avm_rte()); down(sem); } static inline void firq_up(struct semaphore *sem) { BUG_ON(firq_is_avm_rte()); up(sem); } static inline u64 firq_local_clock(void) { unsigned long flags; u64 clock; firq_local_irq_save(flags); clock = sched_clock_cpu(raw_smp_processor_id()); firq_local_irq_restore(flags); return clock; } static inline void firq_start_critical_timings(void) { if (!firq_is_avm_rte()) start_critical_timings(); } static inline void firq_stop_critical_timings(void) { if (!firq_is_avm_rte()) stop_critical_timings(); } static inline bool firq_schedule_work(struct work_struct *work) { BUG_ON(firq_is_avm_rte()); return schedule_work(work); } /** * aus beliebigen Kontext verwendbar * raw_smp_processor_id() funktioniert auch im FASTIRQ-Thread (gp in thread_info mit initialisierter cpu) */ static inline void __firq_wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, void *key) { BUG_ON(firq_is_avm_rte()); wake_up_interruptible(q); return; } #define firq_wake_up(x) __firq_wake_up(x, TASK_NORMAL, 1, NULL) #define firq_wake_up_nr(x, nr) __firq_wake_up(x, TASK_NORMAL, nr, NULL) #define firq_wake_up_all(x) __firq_wake_up(x, TASK_NORMAL, 0, NULL) #define firq_wake_up_interruptible(x) __firq_wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) #define firq_wake_up_interruptible_nr(x, nr) __firq_wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) #define firq_wake_up_interruptible_all(x) __firq_wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) /** */ static inline bool firq_queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) { BUG_ON(firq_is_avm_rte()); return queue_work_on(cpu, wq, work); } /** */ static inline bool firq_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) { BUG_ON(firq_is_avm_rte()); return schedule_delayed_work(dwork, delay); } /** */ static inline bool firq_try_module_get(struct module *module) { BUG_ON(firq_is_avm_rte()); return try_module_get(module); } /** */ static inline void firq_module_put(struct module *module) { BUG_ON(firq_is_avm_rte()); module_put(module); } /** */ static inline void firq_tasklet_hi_schedule(struct tasklet_struct *t) { BUG_ON(firq_is_avm_rte()); tasklet_hi_schedule(t); } static inline void firq_panic(const char *debugstr) { panic("%s\n", debugstr); } /** */ static inline int firq_to_linux_sync_ipi(int timeout) { return 1; } #define __BUILD_AVM_CONTEXT_FUNC(func) firq_##func #else #define __BUILD_AVM_CONTEXT_FUNC(func) func #endif #endif