--- zzzz-none-000/linux-2.6.39.4/arch/arm/include/asm/atomic.h 2011-08-03 19:43:28.000000000 +0000 +++ puma6-arm-6490-729/linux-2.6.39.4/arch/arm/include/asm/atomic.h 2021-11-10 13:23:09.000000000 +0000 @@ -39,6 +39,7 @@ unsigned long tmp; int result; + dmb(); __asm__ __volatile__("@ atomic_add\n" "1: ldrex %0, [%3]\n" " add %0, %0, %4\n" @@ -48,6 +49,7 @@ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); + dmb(); } static inline int atomic_add_return(int i, atomic_t *v) @@ -55,7 +57,7 @@ unsigned long tmp; int result; - smp_mb(); + dmb(); __asm__ __volatile__("@ atomic_add_return\n" "1: ldrex %0, [%3]\n" @@ -67,7 +69,7 @@ : "r" (&v->counter), "Ir" (i) : "cc"); - smp_mb(); + dmb(); return result; } @@ -77,6 +79,7 @@ unsigned long tmp; int result; + dmb(); __asm__ __volatile__("@ atomic_sub\n" "1: ldrex %0, [%3]\n" " sub %0, %0, %4\n" @@ -86,6 +89,7 @@ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); + dmb(); } static inline int atomic_sub_return(int i, atomic_t *v) @@ -93,7 +97,7 @@ unsigned long tmp; int result; - smp_mb(); + dmb(); __asm__ __volatile__("@ atomic_sub_return\n" "1: ldrex %0, [%3]\n" @@ -105,7 +109,7 @@ : "r" (&v->counter), "Ir" (i) : "cc"); - smp_mb(); + dmb(); return result; } @@ -114,7 +118,7 @@ { unsigned long oldval, res; - smp_mb(); + dmb(); do { __asm__ __volatile__("@ atomic_cmpxchg\n" @@ -127,7 +131,7 @@ : "cc"); } while (res); - smp_mb(); + dmb(); return oldval; } @@ -136,6 +140,7 @@ { unsigned long tmp, tmp2; + dmb(); __asm__ __volatile__("@ atomic_clear_mask\n" "1: ldrex %0, [%3]\n" " bic %0, %0, %4\n" @@ -147,6 +152,64 @@ : "cc"); } +static inline int atomic_inc_with_max_return(atomic_t *v, unsigned int max_value) +{ + int oldval, newval; + unsigned long tmp; + max_value -= 1; + + smp_mb(); + + __asm__ __volatile__ ("@ atomic_inc_with_max_return\n" +"1: ldrex %0, [%4]\n" +" teq %0, %5\n" +" beq 2f\n" +" add %1, %0, #1\n" +" strex %2, %1, [%4]\n" +" teq %2, #0\n" +" bne 1b\n" +"2:" + : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (max_value) + : "cc"); + + if (likely((unsigned int)oldval != max_value)) + { + smp_mb(); + + return newval; + } + + return -1; +} + +static inline int atomic_inc_with_wrap_return(atomic_t *v, unsigned int max_value) +{ + int oldval, newval; + unsigned long tmp; + max_value -= 1; + + smp_mb(); + + __asm__ __volatile__ ("@ atomic_inc_with_wrap_return\n" +"1: ldrex %0, [%4]\n" +" teq %0, %5\n" +" moveq %1, #0\n" +" addne %1, %0, #1\n" +" strex %2, %1, [%4]\n" +" teq %2, #0\n" +" bne 1b\n" +"2:" + : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (max_value) + : "cc"); + + smp_mb(); + + return newval; + +} + #else /* ARM_ARCH_6 */ #ifdef CONFIG_SMP @@ -230,10 +293,10 @@ #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() +#define smp_mb__before_atomic_dec() dmb() +#define smp_mb__after_atomic_dec() dmb() +#define smp_mb__before_atomic_inc() dmb() +#define smp_mb__after_atomic_inc() dmb() #ifndef CONFIG_GENERIC_ATOMIC64 typedef struct {