--- zzzz-none-000/linux-4.1.38/include/linux/bit_spinlock.h 2017-01-18 18:48:06.000000000 +0000 +++ bcm63-7582-715/linux-4.1.38/include/linux/bit_spinlock.h 2020-11-25 10:06:48.000000000 +0000 @@ -12,7 +12,7 @@ * Don't use this unless you really need to: spin_lock() and spin_unlock() * are significantly faster. */ -static inline void bit_spin_lock(int bitnum, unsigned long *addr) +static inline void bit_spin_lock(int bitnum __attribute__((unused)), unsigned long *addr __attribute__((unused))) { /* * Assuming the lock is uncontended, this never enters @@ -37,7 +37,7 @@ /* * Return true if it was acquired */ -static inline int bit_spin_trylock(int bitnum, unsigned long *addr) +static inline int bit_spin_trylock(int bitnum __attribute__((unused)), unsigned long *addr __attribute__((unused))) { preempt_disable(); #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) @@ -53,7 +53,7 @@ /* * bit-based spin_unlock() */ -static inline void bit_spin_unlock(int bitnum, unsigned long *addr) +static inline void bit_spin_unlock(int bitnum __attribute__((unused)), unsigned long *addr __attribute__((unused))) { #ifdef CONFIG_DEBUG_SPINLOCK BUG_ON(!test_bit(bitnum, addr)); @@ -70,7 +70,7 @@ * non-atomic version, which can be used eg. if the bit lock itself is * protecting the rest of the flags in the word. */ -static inline void __bit_spin_unlock(int bitnum, unsigned long *addr) +static inline void __bit_spin_unlock(int bitnum __attribute__((unused)), unsigned long *addr __attribute__((unused))) { #ifdef CONFIG_DEBUG_SPINLOCK BUG_ON(!test_bit(bitnum, addr)); @@ -85,7 +85,7 @@ /* * Return true if the lock is held. */ -static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) +static inline int bit_spin_is_locked(int bitnum __attribute__((unused)), unsigned long *addr __attribute__((unused))) { #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) return test_bit(bitnum, addr);