--- zzzz-none-000/linux-2.4.17/include/asm-mips64/bitops.h 2001-09-09 17:43:02.000000000 +0000 +++ sangam-fb-322/linux-2.4.17/include/asm-mips64/bitops.h 2004-11-24 13:21:49.000000000 +0000 @@ -30,8 +30,7 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -extern __inline__ void -set_bit(unsigned long nr, volatile void *addr) +static inline void set_bit(unsigned long nr, volatile void *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> 6); unsigned long temp; @@ -55,7 +54,7 @@ * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -extern __inline__ void __set_bit(int nr, volatile void * addr) +static inline void __set_bit(int nr, volatile void * addr) { unsigned long * m = ((unsigned long *) addr) + (nr >> 6); @@ -72,8 +71,7 @@ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ -extern __inline__ void -clear_bit(unsigned long nr, volatile void *addr) +static inline void clear_bit(unsigned long nr, volatile void *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> 6); unsigned long temp; @@ -99,8 +97,7 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -extern __inline__ void -change_bit(unsigned long nr, volatile void *addr) +static inline void change_bit(unsigned long nr, volatile void *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> 6); unsigned long temp; @@ -123,7 +120,7 @@ * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -extern __inline__ void __change_bit(int nr, volatile void * addr) +static inline void __change_bit(int nr, volatile void * addr) { unsigned long * m = ((unsigned long *) addr) + (nr >> 6); @@ -138,8 +135,8 @@ * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -extern __inline__ unsigned long -test_and_set_bit(unsigned long nr, volatile void *addr) +static inline unsigned long test_and_set_bit(unsigned long nr, + volatile void *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> 6); unsigned long temp, res; @@ -168,8 +165,7 @@ * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -extern __inline__ int -__test_and_set_bit(int nr, volatile void * addr) +static inline int __test_and_set_bit(int nr, volatile void *addr) { unsigned long mask, retval; long *a = (unsigned long *) addr; @@ -190,8 +186,8 @@ * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -extern __inline__ unsigned long -test_and_clear_bit(unsigned long nr, volatile void *addr) +static inline unsigned long test_and_clear_bit(unsigned long nr, + volatile void *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> 6); unsigned long temp, res; @@ -221,8 +217,7 @@ * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -extern __inline__ int -__test_and_clear_bit(int nr, volatile void * addr) +static inline int __test_and_clear_bit(int nr, volatile void * addr) { unsigned long mask, retval; unsigned long *a = (unsigned long *) addr; @@ -243,8 +238,8 @@ * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -extern __inline__ unsigned long -test_and_change_bit(unsigned long nr, volatile void *addr) +static inline unsigned long test_and_change_bit(unsigned long nr, + volatile void *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> 6); unsigned long temp, res; @@ -273,8 +268,7 @@ * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -extern __inline__ int -__test_and_change_bit(int nr, volatile void * addr) +static inline int __test_and_change_bit(int nr, volatile void *addr) { unsigned long mask, retval; unsigned long *a = (unsigned long *) addr; @@ -291,8 +285,7 @@ * @nr: bit number to test * @addr: Address to start counting from */ -extern __inline__ unsigned long -test_bit(int nr, volatile void * addr) +static inline unsigned long test_bit(int nr, volatile void * addr) { return 1UL & (((volatile unsigned long *) addr)[nr >> 6] >> (nr & 0x3f)); } @@ -309,8 +302,7 @@ * Returns the bit-number of the first zero bit, not the number of the byte * containing a bit. */ -extern __inline__ int -find_first_zero_bit (void *addr, unsigned size) +static inline int find_first_zero_bit (void *addr, unsigned size) { unsigned long dummy; int res; @@ -346,8 +338,7 @@ "2:" : "=r" (res), "=r" (dummy), "=r" (addr) : "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff), - "2" (addr), "r" (size) - : "$1"); + "2" (addr), "r" (size)); return res; } @@ -358,8 +349,7 @@ * @offset: The bitnumber to start searching at * @size: The maximum size to search */ -extern __inline__ int -find_next_zero_bit (void * addr, int size, int offset) +static inline int find_next_zero_bit (void * addr, int size, int offset) { unsigned int *p = ((unsigned int *) addr) + (offset >> 5); int set = 0, bit = offset & 31, res; @@ -380,8 +370,7 @@ ".set\treorder\n" "1:" : "=r" (set), "=r" (dummy) - : "0" (0), "1" (1 << bit), "r" (*p) - : "$1"); + : "0" (0), "1" (1 << bit), "r" (*p)); if (set < (32 - bit)) return set + offset; set = 32 - bit; @@ -402,7 +391,7 @@ * * Undefined if no zero exists, so code should check against ~0UL first. */ -extern __inline__ unsigned long ffz(unsigned long word) +static inline unsigned long ffz(unsigned long word) { unsigned long k; @@ -453,8 +442,8 @@ * @offset: The bitnumber to start searching at * @size: The maximum size to search */ -extern __inline__ unsigned long -find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) +static inline unsigned long find_next_zero_bit(void *addr, unsigned long size, + unsigned long offset) { unsigned long *p = ((unsigned long *) addr) + (offset >> 6); unsigned long result = offset & ~63UL; @@ -501,8 +490,7 @@ #ifdef __MIPSEB__ -extern inline int -ext2_set_bit(int nr,void * addr) +static inline int ext2_set_bit(int nr,void * addr) { int mask, retval, flags; unsigned char *ADDR = (unsigned char *) addr; @@ -516,8 +504,7 @@ return retval; } -extern inline int -ext2_clear_bit(int nr, void * addr) +static inline int ext2_clear_bit(int nr, void * addr) { int mask, retval, flags; unsigned char *ADDR = (unsigned char *) addr; @@ -531,8 +518,7 @@ return retval; } -extern inline int -ext2_test_bit(int nr, const void * addr) +static inline int ext2_test_bit(int nr, const void * addr) { int mask; const unsigned char *ADDR = (const unsigned char *) addr; @@ -545,8 +531,9 @@ #define ext2_find_first_zero_bit(addr, size) \ ext2_find_next_zero_bit((addr), (size), 0) -extern inline unsigned int -ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) +static inline unsigned int ext2_find_next_zero_bit(void *addr, + unsigned long size, + unsigned long offset) { unsigned int *p = ((unsigned int *) addr) + (offset >> 5); unsigned int result = offset & ~31UL;