--- zzzz-none-000/linux-5.4.213/arch/arm/include/asm/spinlock.h 2022-09-15 10:04:56.000000000 +0000 +++ alder-5690pro-762/linux-5.4.213/arch/arm/include/asm/spinlock.h 2024-08-14 09:01:27.000000000 +0000 @@ -10,6 +10,39 @@ #include #include +#if defined(CONFIG_ENABLE_SPINLOCK_PROFILING_HOOKS) +extern int avm_simple_profiling_is_enabled_func(void); +extern unsigned int avm_get_cycles_func(void); + +#define avm_simple_profiling_spin_lock(lock) ({ \ + if (avm_simple_profiling_is_enabled_func()) { \ + unsigned long avm_simple_profiling_spinlock_endtime = avm_get_cycles_func(); \ + __avm_simple_profiling_spinlock(_RET_IP_, (unsigned long)lock, avm_simple_profiling_spinlock_endtime - avm_simple_profiling_spinlock_starttime, 0); \ + } \ +}) + +#define avm_simple_profiling_spin_trylock(lock, success) ({ \ + if (avm_simple_profiling_is_enabled_func()) { \ + __avm_simple_profiling_spinlock(_RET_IP_, (unsigned long)lock, 0, success ? 1 : 2); \ + } \ +}) + +#define avm_simple_profiling_spin_unlock(lock) ({ \ + if (avm_simple_profiling_is_enabled_func()) { \ + __avm_simple_profiling_spinlock(_RET_IP_, (unsigned long)lock, 0, 3); \ + } \ +}) + +#define avm_simple_profiling_spin_gettime() unsigned int avm_simple_profiling_spinlock_starttime = avm_get_cycles_func() + +extern void __avm_simple_profiling_spinlock(unsigned long addr, unsigned long lock, unsigned long time, unsigned int id); + +#else +static inline void avm_simple_profiling_spin_lock(void *lock __maybe_unused) { }; +static inline void avm_simple_profiling_spin_trylock(void *lock __maybe_unused, unsigned int success __maybe_unused) { }; +static inline void avm_simple_profiling_spin_unlock(void *lock __maybe_unused) { }; +static inline unsigned int avm_simple_profiling_spin_gettime(void) { return 0; } +#endif /* * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K * extensions, so when running on UP, we have to patch these instructions away. @@ -59,6 +92,7 @@ u32 newval; arch_spinlock_t lockval; + avm_simple_profiling_spin_gettime(); prefetchw(&lock->slock); __asm__ __volatile__( "1: ldrex %0, [%3]\n" @@ -76,6 +110,7 @@ } smp_mb(); + avm_simple_profiling_spin_lock(lock); } static inline int arch_spin_trylock(arch_spinlock_t *lock) @@ -96,6 +131,7 @@ : "cc"); } while (res); + avm_simple_profiling_spin_trylock(lock, contended); if (!contended) { smp_mb(); return 1; @@ -107,6 +143,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) { smp_mb(); + avm_simple_profiling_spin_unlock(lock); lock->tickets.owner++; dsb_sev(); }