--- zzzz-none-000/linux-3.10.107/arch/mips/include/asm/atomic.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/arch/mips/include/asm/atomic.h 2021-02-04 17:41:59.000000000 +0000 @@ -1,5 +1,5 @@ /* - * Atomic operations that C can't guarantee us. Useful for + * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. * * But use these as seldom as possible since they are much more slower @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -29,7 +30,7 @@ * * Atomically reads the value of @v. */ -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) READ_ONCE((v)->counter) /* * atomic_set - set atomic variable @@ -38,98 +39,121 @@ * * Atomically sets the value of @v to @i. */ -#define atomic_set(v, i) ((v)->counter = (i)) +#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i)) -/* - * atomic_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v. - */ -static __inline__ void atomic_add(int i, atomic_t * v) -{ - if (kernel_uses_llsc && R10000_LLSC_WAR) { - int temp; - - __asm__ __volatile__( - " .set mips3 \n" - "1: ll %0, %1 # atomic_add \n" - " addu %0, %2 \n" - " sc %0, %1 \n" - " beqzl %0, 1b \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } else if (kernel_uses_llsc) { - int temp; - - do { - __asm__ __volatile__( - " .set mips3 \n" - " ll %0, %1 # atomic_add \n" - " addu %0, %2 \n" - " sc %0, %1 \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } while (unlikely(!temp)); - } else { - unsigned long flags; - - raw_local_irq_save(flags); - v->counter += i; - raw_local_irq_restore(flags); - } -} +#define ATOMIC_OP(op, c_op, asm_op) \ +static __inline__ void atomic_##op(int i, atomic_t * v) \ +{ \ + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + int temp; \ + \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + "1: ll %0, %1 # atomic_" #op " \n" \ + " " #asm_op " %0, %2 \n" \ + " sc %0, %1 \n" \ + " beqzl %0, 1b \n" \ + " .set mips0 \n" \ + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i)); \ + } else if (kernel_uses_llsc) { \ + int temp; \ + \ + do { \ + __asm__ __volatile__( \ + " .set "MIPS_ISA_LEVEL" \n" \ + " ll %0, %1 # atomic_" #op "\n" \ + " " #asm_op " %0, %2 \n" \ + " sc %0, %1 \n" \ + " .set mips0 \n" \ + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i)); \ + } while (unlikely(!temp)); \ + } else { \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + v->counter c_op i; \ + raw_local_irq_restore(flags); \ + } \ +} + +#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ +static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ +{ \ + int result; \ + \ + smp_mb__before_llsc(); \ + \ + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + int temp; \ + \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + "1: ll %1, %2 # atomic_" #op "_return \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " sc %0, %2 \n" \ + " beqzl %0, 1b \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), \ + "+" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i)); \ + } else if (kernel_uses_llsc) { \ + int temp; \ + \ + do { \ + __asm__ __volatile__( \ + " .set "MIPS_ISA_LEVEL" \n" \ + " ll %1, %2 # atomic_" #op "_return \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " sc %0, %2 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), \ + "+" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i)); \ + } while (unlikely(!result)); \ + \ + result = temp; result c_op i; \ + } else { \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + result = v->counter; \ + result c_op i; \ + v->counter = result; \ + raw_local_irq_restore(flags); \ + } \ + \ + smp_llsc_mb(); \ + \ + return result; \ +} + +#define ATOMIC_OPS(op, c_op, asm_op) \ + ATOMIC_OP(op, c_op, asm_op) \ + ATOMIC_OP_RETURN(op, c_op, asm_op) + +ATOMIC_OPS(add, +=, addu) +ATOMIC_OPS(sub, -=, subu) + +ATOMIC_OP(and, &=, and) +ATOMIC_OP(or, |=, or) +ATOMIC_OP(xor, ^=, xor) + +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP /* - * atomic_sub - subtract the atomic variable + * atomic_sub_if_positive - conditionally subtract integer from atomic variable * @i: integer value to subtract * @v: pointer of type atomic_t * - * Atomically subtracts @i from @v. - */ -static __inline__ void atomic_sub(int i, atomic_t * v) -{ - if (kernel_uses_llsc && R10000_LLSC_WAR) { - int temp; - - __asm__ __volatile__( - " .set mips3 \n" - "1: ll %0, %1 # atomic_sub \n" - " subu %0, %2 \n" - " sc %0, %1 \n" - " beqzl %0, 1b \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } else if (kernel_uses_llsc) { - int temp; - - do { - __asm__ __volatile__( - " .set mips3 \n" - " ll %0, %1 # atomic_sub \n" - " subu %0, %2 \n" - " sc %0, %1 \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } while (unlikely(!temp)); - } else { - unsigned long flags; - - raw_local_irq_save(flags); - v->counter -= i; - raw_local_irq_restore(flags); - } -} - -/* - * Same as above, but return the result value + * Atomically test @v and subtract @i if @v is greater or equal than @i. + * The function returns the old value of @v minus @i. */ -static __inline__ int atomic_add_return(int i, atomic_t * v) +static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) { int result; @@ -139,37 +163,47 @@ int temp; __asm__ __volatile__( - " .set mips3 \n" - "1: ll %1, %2 # atomic_add_return \n" - " addu %0, %1, %3 \n" + " .set arch=r4000 \n" + "1: ll %1, %2 # atomic_sub_if_positive\n" + " subu %0, %1, %3 \n" + " bltz %0, 1f \n" " sc %0, %2 \n" + " .set noreorder \n" " beqzl %0, 1b \n" - " addu %0, %1, %3 \n" + " subu %0, %1, %3 \n" + " .set reorder \n" + "1: \n" " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); + : "=&r" (result), "=&r" (temp), + "+" GCC_OFF_SMALL_ASM() (v->counter) + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) + : "memory"); } else if (kernel_uses_llsc) { int temp; - do { - __asm__ __volatile__( - " .set mips3 \n" - " ll %1, %2 # atomic_add_return \n" - " addu %0, %1, %3 \n" - " sc %0, %2 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } while (unlikely(!result)); - - result = temp + i; + __asm__ __volatile__( + " .set "MIPS_ISA_LEVEL" \n" + "1: ll %1, %2 # atomic_sub_if_positive\n" + " subu %0, %1, %3 \n" + " bltz %0, 1f \n" + " sc %0, %2 \n" + " .set noreorder \n" + " beqz %0, 1b \n" + " subu %0, %1, %3 \n" + " .set reorder \n" + "1: \n" + " .set mips0 \n" + : "=&r" (result), "=&r" (temp), + "+" GCC_OFF_SMALL_ASM() (v->counter) + : "Ir" (i)); } else { unsigned long flags; raw_local_irq_save(flags); result = v->counter; - result += i; - v->counter = result; + result -= i; + if (result >= 0) + v->counter = result; raw_local_irq_restore(flags); } @@ -178,51 +212,42 @@ return result; } -static __inline__ int atomic_sub_return(int i, atomic_t * v) +/* + * return the incremented value if value < max_value + * if incremented value == max_value so do not write and return -1 + */ +static __inline__ int atomic_inc_with_max_return(atomic_t * v, unsigned int max_value) { int result; + max_value -= 1; /* exclusiv */ smp_mb__before_llsc(); if (kernel_uses_llsc && R10000_LLSC_WAR) { - int temp; - - __asm__ __volatile__( - " .set mips3 \n" - "1: ll %1, %2 # atomic_sub_return \n" - " subu %0, %1, %3 \n" - " sc %0, %2 \n" - " beqzl %0, 1b \n" - " subu %0, %1, %3 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); - - result = temp - i; + panic("atomic_inc_with_max_return not implemented\n"); } else if (kernel_uses_llsc) { int temp; + int inc; do { __asm__ __volatile__( - " .set mips3 \n" - " ll %1, %2 # atomic_sub_return \n" - " subu %0, %1, %3 \n" - " sc %0, %2 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } while (unlikely(!result)); - - result = temp - i; + " .set push \n" + " .set mips32r2 \n" + " ll %[temp], %[counter] # atomic_add_return \n" + " sltu %[inc], %[temp], %[max_value] # temp kleiner max_value \n" + " addu %[result], %[temp], %[inc] \n" + " sc %[result], %[counter] \n" + " .set pop \n" + : [inc] "=&r" (inc), [result] "=&r" (result), [temp] "=&r" (temp), [counter] "+m" (v->counter) + : [max_value] "Ir" (max_value) + ); + } while (unlikely(!result)); + if(inc == 0) + result = -1; + else + result = temp + inc; } else { - unsigned long flags; - - raw_local_irq_save(flags); - result = v->counter; - result -= i; - v->counter = result; - raw_local_irq_restore(flags); + panic("atomic_inc_with_max_return not implemented\n"); } smp_llsc_mb(); @@ -231,67 +256,40 @@ } /* - * atomic_sub_if_positive - conditionally subtract integer from atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically test @v and subtract @i if @v is greater or equal than @i. - * The function returns the old value of @v minus @i. + * return the incremented value if value < max_value + * if incremented value == max_value so write and return zero */ -static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) +static __inline__ int atomic_inc_with_wrap_return(atomic_t * v, unsigned int max_value) { int result; + max_value -= 1; /* exclusiv */ smp_mb__before_llsc(); - if (kernel_uses_llsc && R10000_LLSC_WAR) { - int temp; - - __asm__ __volatile__( - " .set mips3 \n" - "1: ll %1, %2 # atomic_sub_if_positive\n" - " subu %0, %1, %3 \n" - " bltz %0, 1f \n" - " sc %0, %2 \n" - " .set noreorder \n" - " beqzl %0, 1b \n" - " subu %0, %1, %3 \n" - " .set reorder \n" - "1: \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); + panic("atomic_inc_with_max_return not implemented\n"); } else if (kernel_uses_llsc) { - int temp; + int temp; + int inc; - __asm__ __volatile__( - " .set mips3 \n" - "1: ll %1, %2 # atomic_sub_if_positive\n" - " subu %0, %1, %3 \n" - " bltz %0, 1f \n" - " sc %0, %2 \n" - " .set noreorder \n" - " beqz %0, 1b \n" - " subu %0, %1, %3 \n" - " .set reorder \n" - "1: \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); + do { + __asm__ __volatile__( + " .set push \n" + " .set mips32r2 \n" + " ll %[temp], %[counter] # atomic_add_return \n" + " sltu %[inc], %[temp], %[max_value] # temp kleiner max_value \n" + " movz %[temp], %[inc], %[inc] \n" + " addu %[result], %[temp], %[inc] \n" + " sc %[result], %[counter] \n" + " .set pop \n" + : [inc] "=&r" (inc), [result] "=&r" (result), [temp] "=&r" (temp), [counter] "+m" (v->counter) + : [max_value] "Ir" (max_value) + ); + } while (unlikely(!result)); + result = temp + inc; } else { - unsigned long flags; - - raw_local_irq_save(flags); - result = v->counter; - result -= i; - if (result >= 0) - v->counter = result; - raw_local_irq_restore(flags); + panic("atomic_inc_with_max_return not implemented\n"); } - smp_llsc_mb(); - return result; } @@ -398,207 +396,122 @@ * @v: pointer of type atomic64_t * */ -#define atomic64_read(v) (*(volatile long *)&(v)->counter) +#define atomic64_read(v) READ_ONCE((v)->counter) /* * atomic64_set - set atomic variable * @v: pointer of type atomic64_t * @i: required value */ -#define atomic64_set(v, i) ((v)->counter = (i)) +#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) -/* - * atomic64_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic64_t - * - * Atomically adds @i to @v. - */ -static __inline__ void atomic64_add(long i, atomic64_t * v) -{ - if (kernel_uses_llsc && R10000_LLSC_WAR) { - long temp; - - __asm__ __volatile__( - " .set mips3 \n" - "1: lld %0, %1 # atomic64_add \n" - " daddu %0, %2 \n" - " scd %0, %1 \n" - " beqzl %0, 1b \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } else if (kernel_uses_llsc) { - long temp; - - do { - __asm__ __volatile__( - " .set mips3 \n" - " lld %0, %1 # atomic64_add \n" - " daddu %0, %2 \n" - " scd %0, %1 \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } while (unlikely(!temp)); - } else { - unsigned long flags; - - raw_local_irq_save(flags); - v->counter += i; - raw_local_irq_restore(flags); - } -} +#define ATOMIC64_OP(op, c_op, asm_op) \ +static __inline__ void atomic64_##op(long i, atomic64_t * v) \ +{ \ + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + long temp; \ + \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + "1: lld %0, %1 # atomic64_" #op " \n" \ + " " #asm_op " %0, %2 \n" \ + " scd %0, %1 \n" \ + " beqzl %0, 1b \n" \ + " .set mips0 \n" \ + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i)); \ + } else if (kernel_uses_llsc) { \ + long temp; \ + \ + do { \ + __asm__ __volatile__( \ + " .set "MIPS_ISA_LEVEL" \n" \ + " lld %0, %1 # atomic64_" #op "\n" \ + " " #asm_op " %0, %2 \n" \ + " scd %0, %1 \n" \ + " .set mips0 \n" \ + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i)); \ + } while (unlikely(!temp)); \ + } else { \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + v->counter c_op i; \ + raw_local_irq_restore(flags); \ + } \ +} + +#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ +static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ +{ \ + long result; \ + \ + smp_mb__before_llsc(); \ + \ + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + long temp; \ + \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + "1: lld %1, %2 # atomic64_" #op "_return\n" \ + " " #asm_op " %0, %1, %3 \n" \ + " scd %0, %2 \n" \ + " beqzl %0, 1b \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), \ + "+" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i)); \ + } else if (kernel_uses_llsc) { \ + long temp; \ + \ + do { \ + __asm__ __volatile__( \ + " .set "MIPS_ISA_LEVEL" \n" \ + " lld %1, %2 # atomic64_" #op "_return\n" \ + " " #asm_op " %0, %1, %3 \n" \ + " scd %0, %2 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), \ + "=" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \ + : "memory"); \ + } while (unlikely(!result)); \ + \ + result = temp; result c_op i; \ + } else { \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + result = v->counter; \ + result c_op i; \ + v->counter = result; \ + raw_local_irq_restore(flags); \ + } \ + \ + smp_llsc_mb(); \ + \ + return result; \ +} + +#define ATOMIC64_OPS(op, c_op, asm_op) \ + ATOMIC64_OP(op, c_op, asm_op) \ + ATOMIC64_OP_RETURN(op, c_op, asm_op) + +ATOMIC64_OPS(add, +=, daddu) +ATOMIC64_OPS(sub, -=, dsubu) +ATOMIC64_OP(and, &=, and) +ATOMIC64_OP(or, |=, or) +ATOMIC64_OP(xor, ^=, xor) + +#undef ATOMIC64_OPS +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP /* - * atomic64_sub - subtract the atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic64_t - * - * Atomically subtracts @i from @v. - */ -static __inline__ void atomic64_sub(long i, atomic64_t * v) -{ - if (kernel_uses_llsc && R10000_LLSC_WAR) { - long temp; - - __asm__ __volatile__( - " .set mips3 \n" - "1: lld %0, %1 # atomic64_sub \n" - " dsubu %0, %2 \n" - " scd %0, %1 \n" - " beqzl %0, 1b \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } else if (kernel_uses_llsc) { - long temp; - - do { - __asm__ __volatile__( - " .set mips3 \n" - " lld %0, %1 # atomic64_sub \n" - " dsubu %0, %2 \n" - " scd %0, %1 \n" - " .set mips0 \n" - : "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } while (unlikely(!temp)); - } else { - unsigned long flags; - - raw_local_irq_save(flags); - v->counter -= i; - raw_local_irq_restore(flags); - } -} - -/* - * Same as above, but return the result value - */ -static __inline__ long atomic64_add_return(long i, atomic64_t * v) -{ - long result; - - smp_mb__before_llsc(); - - if (kernel_uses_llsc && R10000_LLSC_WAR) { - long temp; - - __asm__ __volatile__( - " .set mips3 \n" - "1: lld %1, %2 # atomic64_add_return \n" - " daddu %0, %1, %3 \n" - " scd %0, %2 \n" - " beqzl %0, 1b \n" - " daddu %0, %1, %3 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) - : "Ir" (i)); - } else if (kernel_uses_llsc) { - long temp; - - do { - __asm__ __volatile__( - " .set mips3 \n" - " lld %1, %2 # atomic64_add_return \n" - " daddu %0, %1, %3 \n" - " scd %0, %2 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); - } while (unlikely(!result)); - - result = temp + i; - } else { - unsigned long flags; - - raw_local_irq_save(flags); - result = v->counter; - result += i; - v->counter = result; - raw_local_irq_restore(flags); - } - - smp_llsc_mb(); - - return result; -} - -static __inline__ long atomic64_sub_return(long i, atomic64_t * v) -{ - long result; - - smp_mb__before_llsc(); - - if (kernel_uses_llsc && R10000_LLSC_WAR) { - long temp; - - __asm__ __volatile__( - " .set mips3 \n" - "1: lld %1, %2 # atomic64_sub_return \n" - " dsubu %0, %1, %3 \n" - " scd %0, %2 \n" - " beqzl %0, 1b \n" - " dsubu %0, %1, %3 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); - } else if (kernel_uses_llsc) { - long temp; - - do { - __asm__ __volatile__( - " .set mips3 \n" - " lld %1, %2 # atomic64_sub_return \n" - " dsubu %0, %1, %3 \n" - " scd %0, %2 \n" - " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); - } while (unlikely(!result)); - - result = temp - i; - } else { - unsigned long flags; - - raw_local_irq_save(flags); - result = v->counter; - result -= i; - v->counter = result; - raw_local_irq_restore(flags); - } - - smp_llsc_mb(); - - return result; -} - -/* - * atomic64_sub_if_positive - conditionally subtract integer from atomic variable + * atomic64_sub_if_positive - conditionally subtract integer from atomic + * variable * @i: integer value to subtract * @v: pointer of type atomic64_t * @@ -615,7 +528,7 @@ long temp; __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" "1: lld %1, %2 # atomic64_sub_if_positive\n" " dsubu %0, %1, %3 \n" " bltz %0, 1f \n" @@ -626,14 +539,15 @@ " .set reorder \n" "1: \n" " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) + : "=&r" (result), "=&r" (temp), + "=" GCC_OFF_SMALL_ASM() (v->counter) + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) : "memory"); } else if (kernel_uses_llsc) { long temp; __asm__ __volatile__( - " .set mips3 \n" + " .set "MIPS_ISA_LEVEL" \n" "1: lld %1, %2 # atomic64_sub_if_positive\n" " dsubu %0, %1, %3 \n" " bltz %0, 1f \n" @@ -644,7 +558,8 @@ " .set reorder \n" "1: \n" " .set mips0 \n" - : "=&r" (result), "=&r" (temp), "+m" (v->counter) + : "=&r" (result), "=&r" (temp), + "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); } else { unsigned long flags; @@ -673,7 +588,7 @@ * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns the old value of @v. + * Returns true iff @v was not @u. */ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) { @@ -761,13 +676,4 @@ #endif /* CONFIG_64BIT */ -/* - * atomic*_return operations are serializing but not the non-*_return - * versions. - */ -#define smp_mb__before_atomic_dec() smp_mb__before_llsc() -#define smp_mb__after_atomic_dec() smp_llsc_mb() -#define smp_mb__before_atomic_inc() smp_mb__before_llsc() -#define smp_mb__after_atomic_inc() smp_llsc_mb() - #endif /* _ASM_ATOMIC_H */