--- zzzz-none-000/linux-3.10.107/include/linux/spinlock.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/include/linux/spinlock.h 2021-02-04 17:41:59.000000000 +0000 @@ -56,6 +56,12 @@ #include #include #include +#if defined(CONFIG_AVM_FASTIRQ_DEBUG) +#include +#define AVM_CHECK_LINUX_CONTEXT() do { if (is_cpu_mode_fiq()) { printk_linux(KERN_ERR"ERROR: fiq-context 0x%08lx 0x%08lx\n", _THIS_IP_, _RET_IP_);} } while(0) +#else/*--- #if defined(CONFIG_AVM_FASTIRQ_DEBUG) ---*/ +#define AVM_CHECK_LINUX_CONTEXT() +#endif/*--- #if defined(CONFIG_AVM_FASTIRQ_DEBUG) ---*/ /* @@ -120,7 +126,7 @@ /* * Despite its name it doesn't necessarily has to be a full barrier. * It should only guarantee that a STORE before the critical section - * can not be reordered with a LOAD inside this section. + * can not be reordered with LOADs and STOREs inside this section. * spin_lock() is the one-way barrier, this LOAD can not escape out * of the region. So the default implementation simply ensures that * a STORE can not move into the critical section, smp_wmb() should @@ -179,7 +185,15 @@ #ifdef CONFIG_DEBUG_LOCK_ALLOC # define raw_spin_lock_nested(lock, subclass) \ - _raw_spin_lock_nested(lock, subclass) + do { \ + AVM_CHECK_LINUX_CONTEXT(); \ + _raw_spin_lock_nested(lock, subclass); \ + } while (0) +# define raw_spin_lock_bh_nested(lock, subclass) \ + do { \ + AVM_CHECK_LINUX_CONTEXT(); \ + _raw_spin_lock_bh_nested(lock, subclass); \ + } while (0) # define raw_spin_lock_nest_lock(lock, nest_lock) \ do { \ @@ -187,14 +201,22 @@ _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ } while (0) #else -# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock) +/* + * Always evaluate the 'subclass' argument to avoid that the compiler + * warns about set-but-not-used variables when building with + * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. + */ +# define raw_spin_lock_nested(lock, subclass) \ + _raw_spin_lock(((void)(subclass), (lock))) # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) +# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock) #endif #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) #define raw_spin_lock_irqsave(lock, flags) \ do { \ + AVM_CHECK_LINUX_CONTEXT(); \ typecheck(unsigned long, flags); \ flags = _raw_spin_lock_irqsave(lock); \ } while (0) @@ -202,12 +224,14 @@ #ifdef CONFIG_DEBUG_LOCK_ALLOC #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ + AVM_CHECK_LINUX_CONTEXT(); \ typecheck(unsigned long, flags); \ flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ } while (0) #else #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ + AVM_CHECK_LINUX_CONTEXT(); \ typecheck(unsigned long, flags); \ flags = _raw_spin_lock_irqsave(lock); \ } while (0) @@ -243,6 +267,7 @@ #define raw_spin_trylock_irq(lock) \ ({ \ + AVM_CHECK_LINUX_CONTEXT(); \ local_irq_disable(); \ raw_spin_trylock(lock) ? \ 1 : ({ local_irq_enable(); 0; }); \ @@ -250,6 +275,7 @@ #define raw_spin_trylock_irqsave(lock, flags) \ ({ \ + AVM_CHECK_LINUX_CONTEXT(); \ local_irq_save(flags); \ raw_spin_trylock(lock) ? \ 1 : ({ local_irq_restore(flags); 0; }); \ @@ -277,7 +303,7 @@ * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ -static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) +static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) { return &lock->rlock; } @@ -288,97 +314,113 @@ raw_spin_lock_init(&(_lock)->rlock); \ } while (0) -static inline void spin_lock(spinlock_t *lock) +static __always_inline void spin_lock(spinlock_t *lock) { + AVM_CHECK_LINUX_CONTEXT(); raw_spin_lock(&lock->rlock); } -static inline void spin_lock_bh(spinlock_t *lock) +static __always_inline void spin_lock_bh(spinlock_t *lock) { + AVM_CHECK_LINUX_CONTEXT(); raw_spin_lock_bh(&lock->rlock); } -static inline int spin_trylock(spinlock_t *lock) +static __always_inline int spin_trylock(spinlock_t *lock) { + AVM_CHECK_LINUX_CONTEXT(); return raw_spin_trylock(&lock->rlock); } #define spin_lock_nested(lock, subclass) \ do { \ + AVM_CHECK_LINUX_CONTEXT(); \ raw_spin_lock_nested(spinlock_check(lock), subclass); \ } while (0) +#define spin_lock_bh_nested(lock, subclass) \ +do { \ + raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\ +} while (0) + #define spin_lock_nest_lock(lock, nest_lock) \ do { \ + AVM_CHECK_LINUX_CONTEXT(); \ raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ } while (0) -static inline void spin_lock_irq(spinlock_t *lock) +static __always_inline void spin_lock_irq(spinlock_t *lock) { + AVM_CHECK_LINUX_CONTEXT(); raw_spin_lock_irq(&lock->rlock); } #define spin_lock_irqsave(lock, flags) \ do { \ + AVM_CHECK_LINUX_CONTEXT(); \ raw_spin_lock_irqsave(spinlock_check(lock), flags); \ } while (0) #define spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ + AVM_CHECK_LINUX_CONTEXT(); \ raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ } while (0) -static inline void spin_unlock(spinlock_t *lock) +static __always_inline void spin_unlock(spinlock_t *lock) { raw_spin_unlock(&lock->rlock); } -static inline void spin_unlock_bh(spinlock_t *lock) +static __always_inline void spin_unlock_bh(spinlock_t *lock) { raw_spin_unlock_bh(&lock->rlock); } -static inline void spin_unlock_irq(spinlock_t *lock) +static __always_inline void spin_unlock_irq(spinlock_t *lock) { raw_spin_unlock_irq(&lock->rlock); } -static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) +static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) { raw_spin_unlock_irqrestore(&lock->rlock, flags); } -static inline int spin_trylock_bh(spinlock_t *lock) +static __always_inline int spin_trylock_bh(spinlock_t *lock) { + AVM_CHECK_LINUX_CONTEXT(); return raw_spin_trylock_bh(&lock->rlock); } -static inline int spin_trylock_irq(spinlock_t *lock) +static __always_inline int spin_trylock_irq(spinlock_t *lock) { + AVM_CHECK_LINUX_CONTEXT(); return raw_spin_trylock_irq(&lock->rlock); } #define spin_trylock_irqsave(lock, flags) \ ({ \ + AVM_CHECK_LINUX_CONTEXT(); \ raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ }) -static inline void spin_unlock_wait(spinlock_t *lock) +static __always_inline void spin_unlock_wait(spinlock_t *lock) { raw_spin_unlock_wait(&lock->rlock); } -static inline int spin_is_locked(spinlock_t *lock) +static __always_inline int spin_is_locked(spinlock_t *lock) { return raw_spin_is_locked(&lock->rlock); } -static inline int spin_is_contended(spinlock_t *lock) +static __always_inline int spin_is_contended(spinlock_t *lock) { return raw_spin_is_contended(&lock->rlock); } -static inline int spin_can_lock(spinlock_t *lock) +static __always_inline int spin_can_lock(spinlock_t *lock) { return raw_spin_can_lock(&lock->rlock); }