--- zzzz-none-000/linux-4.1.38/include/linux/spinlock.h 2017-01-18 18:48:06.000000000 +0000 +++ bcm63-7582-715/linux-4.1.38/include/linux/spinlock.h 2020-11-25 10:06:48.000000000 +0000 @@ -56,6 +56,12 @@ #include #include #include +#if defined(CONFIG_AVM_FASTIRQ_DEBUG) +#include +#define AVM_CHECK_LINUX_CONTEXT() do { if (is_cpu_mode_fiq()) { printk_linux(KERN_ERR"ERROR: fiq-context 0x%08lx 0x%08lx\n", _THIS_IP_, _RET_IP_);} } while(0) +#else/*--- #if defined(CONFIG_AVM_FASTIRQ_DEBUG) ---*/ +#define AVM_CHECK_LINUX_CONTEXT() +#endif/*--- #if defined(CONFIG_AVM_FASTIRQ_DEBUG) ---*/ /* @@ -120,7 +126,7 @@ /* * Despite its name it doesn't necessarily has to be a full barrier. * It should only guarantee that a STORE before the critical section - * can not be reordered with a LOAD inside this section. + * can not be reordered with LOADs and STOREs inside this section. * spin_lock() is the one-way barrier, this LOAD can not escape out * of the region. So the default implementation simply ensures that * a STORE can not move into the critical section, smp_wmb() should @@ -159,7 +165,7 @@ } static inline void -do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) +do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags __attribute__((unused))) __acquires(lock) { __acquire(lock); arch_spin_lock_flags(&lock->raw_lock, *flags); @@ -189,9 +195,15 @@ #ifdef CONFIG_DEBUG_LOCK_ALLOC # define raw_spin_lock_nested(lock, subclass) \ - _raw_spin_lock_nested(lock, subclass) + do { \ + AVM_CHECK_LINUX_CONTEXT(); \ + _raw_spin_lock_nested(lock, subclass); \ + } while (0) # define raw_spin_lock_bh_nested(lock, subclass) \ - _raw_spin_lock_bh_nested(lock, subclass) + do { \ + AVM_CHECK_LINUX_CONTEXT(); \ + _raw_spin_lock_bh_nested(lock, subclass); \ + } while (0) # define raw_spin_lock_nest_lock(lock, nest_lock) \ do { \ @@ -214,6 +226,7 @@ #define raw_spin_lock_irqsave(lock, flags) \ do { \ + AVM_CHECK_LINUX_CONTEXT(); \ typecheck(unsigned long, flags); \ flags = _raw_spin_lock_irqsave(lock); \ } while (0) @@ -221,12 +234,14 @@ #ifdef CONFIG_DEBUG_LOCK_ALLOC #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ + AVM_CHECK_LINUX_CONTEXT(); \ typecheck(unsigned long, flags); \ flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ } while (0) #else #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ + AVM_CHECK_LINUX_CONTEXT(); \ typecheck(unsigned long, flags); \ flags = _raw_spin_lock_irqsave(lock); \ } while (0) @@ -262,6 +277,7 @@ #define raw_spin_trylock_irq(lock) \ ({ \ + AVM_CHECK_LINUX_CONTEXT(); \ local_irq_disable(); \ raw_spin_trylock(lock) ? \ 1 : ({ local_irq_enable(); 0; }); \ @@ -269,6 +285,7 @@ #define raw_spin_trylock_irqsave(lock, flags) \ ({ \ + AVM_CHECK_LINUX_CONTEXT(); \ local_irq_save(flags); \ raw_spin_trylock(lock) ? \ 1 : ({ local_irq_restore(flags); 0; }); \ @@ -309,21 +326,25 @@ static inline void spin_lock(spinlock_t *lock) { + AVM_CHECK_LINUX_CONTEXT(); raw_spin_lock(&lock->rlock); } static inline void spin_lock_bh(spinlock_t *lock) { + AVM_CHECK_LINUX_CONTEXT(); raw_spin_lock_bh(&lock->rlock); } static inline int spin_trylock(spinlock_t *lock) { + AVM_CHECK_LINUX_CONTEXT(); return raw_spin_trylock(&lock->rlock); } #define spin_lock_nested(lock, subclass) \ do { \ + AVM_CHECK_LINUX_CONTEXT(); \ raw_spin_lock_nested(spinlock_check(lock), subclass); \ } while (0) @@ -334,21 +355,25 @@ #define spin_lock_nest_lock(lock, nest_lock) \ do { \ + AVM_CHECK_LINUX_CONTEXT(); \ raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ } while (0) static inline void spin_lock_irq(spinlock_t *lock) { + AVM_CHECK_LINUX_CONTEXT(); raw_spin_lock_irq(&lock->rlock); } #define spin_lock_irqsave(lock, flags) \ do { \ + AVM_CHECK_LINUX_CONTEXT(); \ raw_spin_lock_irqsave(spinlock_check(lock), flags); \ } while (0) #define spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ + AVM_CHECK_LINUX_CONTEXT(); \ raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ } while (0) @@ -374,16 +399,19 @@ static inline int spin_trylock_bh(spinlock_t *lock) { + AVM_CHECK_LINUX_CONTEXT(); return raw_spin_trylock_bh(&lock->rlock); } static inline int spin_trylock_irq(spinlock_t *lock) { + AVM_CHECK_LINUX_CONTEXT(); return raw_spin_trylock_irq(&lock->rlock); } #define spin_trylock_irqsave(lock, flags) \ ({ \ + AVM_CHECK_LINUX_CONTEXT(); \ raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ })