--- zzzz-none-000/linux-2.6.19.2/arch/i386/lib/usercopy.c 2007-01-10 19:10:37.000000000 +0000 +++ davinci-8020-5504/linux-2.6.19.2/arch/i386/lib/usercopy.c 2007-01-19 14:42:56.000000000 +0000 @@ -28,34 +28,41 @@ * Copy a null terminated string from userspace. */ -#define __do_strncpy_from_user(dst,src,count,res) \ -do { \ - int __d0, __d1, __d2; \ - might_sleep(); \ - __asm__ __volatile__( \ - " testl %1,%1\n" \ - " jz 2f\n" \ - "0: lodsb\n" \ - " stosb\n" \ - " testb %%al,%%al\n" \ - " jz 1f\n" \ - " decl %1\n" \ - " jnz 0b\n" \ - "1: subl %1,%0\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl %5,%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 0b,3b\n" \ - ".previous" \ - : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ - "=&D" (__d2) \ - : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ - : "memory"); \ -} while (0) +static long __do_strncpy_from_user(char *dst, const char __user *src, long count) +{ + int __d0, __d1, __d2; + long res = -EFAULT; + + might_sleep(); + __asm__ __volatile__( + " movw %w10,%%ds\n" + " testl %1,%1\n" + " jz 2f\n" + "0: lodsb\n" + " stosb\n" + " testb %%al,%%al\n" + " jz 1f\n" + " decl %1\n" + " jnz 0b\n" + "1: subl %1,%0\n" + "2:\n" + " pushl %%ss\n" + " popl %%ds\n" + ".section .fixup,\"ax\"\n" + "3: movl %5,%0\n" + " jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 0b,3b\n" + ".previous" + : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), + "=&D" (__d2) + : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst), + "r"(__USER_DS) + : "memory"); + return res; +} /** * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. @@ -80,9 +87,7 @@ long __strncpy_from_user(char *dst, const char __user *src, long count) { - long res; - __do_strncpy_from_user(dst, src, count, res); - return res; + return __do_strncpy_from_user(dst, src, count); } EXPORT_SYMBOL(__strncpy_from_user); @@ -109,7 +114,7 @@ { long res = -EFAULT; if (access_ok(VERIFY_READ, src, 1)) - __do_strncpy_from_user(dst, src, count, res); + res = __do_strncpy_from_user(dst, src, count); return res; } EXPORT_SYMBOL(strncpy_from_user); @@ -118,27 +123,33 @@ * Zero Userspace */ -#define __do_clear_user(addr,size) \ -do { \ - int __d0; \ - might_sleep(); \ - __asm__ __volatile__( \ - "0: rep; stosl\n" \ - " movl %2,%0\n" \ - "1: rep; stosb\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: lea 0(%2,%0,4),%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 0b,3b\n" \ - " .long 1b,2b\n" \ - ".previous" \ - : "=&c"(size), "=&D" (__d0) \ - : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \ -} while (0) +static unsigned long __do_clear_user(void __user *addr, unsigned long size) +{ + int __d0; + + might_sleep(); + __asm__ __volatile__( + " movw %w6,%%es\n" + "0: rep; stosl\n" + " movl %2,%0\n" + "1: rep; stosb\n" + "2:\n" + " pushl %%ss\n" + " popl %%es\n" + ".section .fixup,\"ax\"\n" + "3: lea 0(%2,%0,4),%0\n" + " jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 0b,3b\n" + " .long 1b,2b\n" + ".previous" + : "=&c"(size), "=&D" (__d0) + : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0), + "r"(__USER_DS)); + return size; +} /** * clear_user: - Zero a block of memory in user space. @@ -155,7 +166,7 @@ { might_sleep(); if (access_ok(VERIFY_WRITE, to, n)) - __do_clear_user(to, n); + n = __do_clear_user(to, n); return n; } EXPORT_SYMBOL(clear_user); @@ -174,8 +185,7 @@ unsigned long __clear_user(void __user *to, unsigned long n) { - __do_clear_user(to, n); - return n; + return __do_clear_user(to, n); } EXPORT_SYMBOL(__clear_user); @@ -198,14 +208,17 @@ might_sleep(); __asm__ __volatile__( + " movw %w8,%%es\n" " testl %0, %0\n" " jz 3f\n" - " andl %0,%%ecx\n" + " movl %0,%%ecx\n" "0: repne; scasb\n" " setne %%al\n" " subl %%ecx,%0\n" " addl %0,%%eax\n" "1:\n" + " pushl %%ss\n" + " popl %%es\n" ".section .fixup,\"ax\"\n" "2: xorl %%eax,%%eax\n" " jmp 1b\n" @@ -217,7 +230,7 @@ " .long 0b,2b\n" ".previous" :"=r" (n), "=D" (s), "=a" (res), "=c" (tmp) - :"0" (n), "1" (s), "2" (0), "3" (mask) + :"0" (n), "1" (s), "2" (0), "3" (mask), "r" (__USER_DS) :"cc"); return res & mask; } @@ -225,10 +238,121 @@ #ifdef CONFIG_X86_INTEL_USERCOPY static unsigned long -__copy_user_intel(void __user *to, const void *from, unsigned long size) +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size) +{ + int d0, d1; + __asm__ __volatile__( + " movw %w6, %%es\n" + " .align 2,0x90\n" + "1: movl 32(%4), %%eax\n" + " cmpl $67, %0\n" + " jbe 3f\n" + "2: movl 64(%4), %%eax\n" + " .align 2,0x90\n" + "3: movl 0(%4), %%eax\n" + "4: movl 4(%4), %%edx\n" + "5: movl %%eax, %%es:0(%3)\n" + "6: movl %%edx, %%es:4(%3)\n" + "7: movl 8(%4), %%eax\n" + "8: movl 12(%4),%%edx\n" + "9: movl %%eax, %%es:8(%3)\n" + "10: movl %%edx, %%es:12(%3)\n" + "11: movl 16(%4), %%eax\n" + "12: movl 20(%4), %%edx\n" + "13: movl %%eax, %%es:16(%3)\n" + "14: movl %%edx, %%es:20(%3)\n" + "15: movl 24(%4), %%eax\n" + "16: movl 28(%4), %%edx\n" + "17: movl %%eax, %%es:24(%3)\n" + "18: movl %%edx, %%es:28(%3)\n" + "19: movl 32(%4), %%eax\n" + "20: movl 36(%4), %%edx\n" + "21: movl %%eax, %%es:32(%3)\n" + "22: movl %%edx, %%es:36(%3)\n" + "23: movl 40(%4), %%eax\n" + "24: movl 44(%4), %%edx\n" + "25: movl %%eax, %%es:40(%3)\n" + "26: movl %%edx, %%es:44(%3)\n" + "27: movl 48(%4), %%eax\n" + "28: movl 52(%4), %%edx\n" + "29: movl %%eax, %%es:48(%3)\n" + "30: movl %%edx, %%es:52(%3)\n" + "31: movl 56(%4), %%eax\n" + "32: movl 60(%4), %%edx\n" + "33: movl %%eax, %%es:56(%3)\n" + "34: movl %%edx, %%es:60(%3)\n" + " addl $-64, %0\n" + " addl $64, %4\n" + " addl $64, %3\n" + " cmpl $63, %0\n" + " ja 1b\n" + "35: movl %0, %%eax\n" + " shrl $2, %0\n" + " andl $3, %%eax\n" + " cld\n" + "99: rep; movsl\n" + "36: movl %%eax, %0\n" + "37: rep; movsb\n" + "100:\n" + " pushl %%ss\n" + " popl %%es\n" + ".section .fixup,\"ax\"\n" + "101: lea 0(%%eax,%0,4),%0\n" + " jmp 100b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 1b,100b\n" + " .long 2b,100b\n" + " .long 3b,100b\n" + " .long 4b,100b\n" + " .long 5b,100b\n" + " .long 6b,100b\n" + " .long 7b,100b\n" + " .long 8b,100b\n" + " .long 9b,100b\n" + " .long 10b,100b\n" + " .long 11b,100b\n" + " .long 12b,100b\n" + " .long 13b,100b\n" + " .long 14b,100b\n" + " .long 15b,100b\n" + " .long 16b,100b\n" + " .long 17b,100b\n" + " .long 18b,100b\n" + " .long 19b,100b\n" + " .long 20b,100b\n" + " .long 21b,100b\n" + " .long 22b,100b\n" + " .long 23b,100b\n" + " .long 24b,100b\n" + " .long 25b,100b\n" + " .long 26b,100b\n" + " .long 27b,100b\n" + " .long 28b,100b\n" + " .long 29b,100b\n" + " .long 30b,100b\n" + " .long 31b,100b\n" + " .long 32b,100b\n" + " .long 33b,100b\n" + " .long 34b,100b\n" + " .long 35b,100b\n" + " .long 36b,100b\n" + " .long 37b,100b\n" + " .long 99b,101b\n" + ".previous" + : "=&c"(size), "=&D" (d0), "=&S" (d1) + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) + : "eax", "edx", "memory"); + return size; +} + +static unsigned long +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size) { int d0, d1; __asm__ __volatile__( + " movw %w6, %%ds\n" " .align 2,0x90\n" "1: movl 32(%4), %%eax\n" " cmpl $67, %0\n" @@ -237,36 +361,36 @@ " .align 2,0x90\n" "3: movl 0(%4), %%eax\n" "4: movl 4(%4), %%edx\n" - "5: movl %%eax, 0(%3)\n" - "6: movl %%edx, 4(%3)\n" + "5: movl %%eax, %%es:0(%3)\n" + "6: movl %%edx, %%es:4(%3)\n" "7: movl 8(%4), %%eax\n" "8: movl 12(%4),%%edx\n" - "9: movl %%eax, 8(%3)\n" - "10: movl %%edx, 12(%3)\n" + "9: movl %%eax, %%es:8(%3)\n" + "10: movl %%edx, %%es:12(%3)\n" "11: movl 16(%4), %%eax\n" "12: movl 20(%4), %%edx\n" - "13: movl %%eax, 16(%3)\n" - "14: movl %%edx, 20(%3)\n" + "13: movl %%eax, %%es:16(%3)\n" + "14: movl %%edx, %%es:20(%3)\n" "15: movl 24(%4), %%eax\n" "16: movl 28(%4), %%edx\n" - "17: movl %%eax, 24(%3)\n" - "18: movl %%edx, 28(%3)\n" + "17: movl %%eax, %%es:24(%3)\n" + "18: movl %%edx, %%es:28(%3)\n" "19: movl 32(%4), %%eax\n" "20: movl 36(%4), %%edx\n" - "21: movl %%eax, 32(%3)\n" - "22: movl %%edx, 36(%3)\n" + "21: movl %%eax, %%es:32(%3)\n" + "22: movl %%edx, %%es:36(%3)\n" "23: movl 40(%4), %%eax\n" "24: movl 44(%4), %%edx\n" - "25: movl %%eax, 40(%3)\n" - "26: movl %%edx, 44(%3)\n" + "25: movl %%eax, %%es:40(%3)\n" + "26: movl %%edx, %%es:44(%3)\n" "27: movl 48(%4), %%eax\n" "28: movl 52(%4), %%edx\n" - "29: movl %%eax, 48(%3)\n" - "30: movl %%edx, 52(%3)\n" + "29: movl %%eax, %%es:48(%3)\n" + "30: movl %%edx, %%es:52(%3)\n" "31: movl 56(%4), %%eax\n" "32: movl 60(%4), %%edx\n" - "33: movl %%eax, 56(%3)\n" - "34: movl %%edx, 60(%3)\n" + "33: movl %%eax, %%es:56(%3)\n" + "34: movl %%edx, %%es:60(%3)\n" " addl $-64, %0\n" " addl $64, %4\n" " addl $64, %3\n" @@ -280,6 +404,8 @@ "36: movl %%eax, %0\n" "37: rep; movsb\n" "100:\n" + " pushl %%ss\n" + " popl %%ds\n" ".section .fixup,\"ax\"\n" "101: lea 0(%%eax,%0,4),%0\n" " jmp 100b\n" @@ -326,7 +452,7 @@ " .long 99b,101b\n" ".previous" : "=&c"(size), "=&D" (d0), "=&S" (d1) - : "1"(to), "2"(from), "0"(size) + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) : "eax", "edx", "memory"); return size; } @@ -336,6 +462,7 @@ { int d0, d1; __asm__ __volatile__( + " movw %w6, %%ds\n" " .align 2,0x90\n" "0: movl 32(%4), %%eax\n" " cmpl $67, %0\n" @@ -344,36 +471,36 @@ " .align 2,0x90\n" "2: movl 0(%4), %%eax\n" "21: movl 4(%4), %%edx\n" - " movl %%eax, 0(%3)\n" - " movl %%edx, 4(%3)\n" + " movl %%eax, %%es:0(%3)\n" + " movl %%edx, %%es:4(%3)\n" "3: movl 8(%4), %%eax\n" "31: movl 12(%4),%%edx\n" - " movl %%eax, 8(%3)\n" - " movl %%edx, 12(%3)\n" + " movl %%eax, %%es:8(%3)\n" + " movl %%edx, %%es:12(%3)\n" "4: movl 16(%4), %%eax\n" "41: movl 20(%4), %%edx\n" - " movl %%eax, 16(%3)\n" - " movl %%edx, 20(%3)\n" + " movl %%eax, %%es:16(%3)\n" + " movl %%edx, %%es:20(%3)\n" "10: movl 24(%4), %%eax\n" "51: movl 28(%4), %%edx\n" - " movl %%eax, 24(%3)\n" - " movl %%edx, 28(%3)\n" + " movl %%eax, %%es:24(%3)\n" + " movl %%edx, %%es:28(%3)\n" "11: movl 32(%4), %%eax\n" "61: movl 36(%4), %%edx\n" - " movl %%eax, 32(%3)\n" - " movl %%edx, 36(%3)\n" + " movl %%eax, %%es:32(%3)\n" + " movl %%edx, %%es:36(%3)\n" "12: movl 40(%4), %%eax\n" "71: movl 44(%4), %%edx\n" - " movl %%eax, 40(%3)\n" - " movl %%edx, 44(%3)\n" + " movl %%eax, %%es:40(%3)\n" + " movl %%edx, %%es:44(%3)\n" "13: movl 48(%4), %%eax\n" "81: movl 52(%4), %%edx\n" - " movl %%eax, 48(%3)\n" - " movl %%edx, 52(%3)\n" + " movl %%eax, %%es:48(%3)\n" + " movl %%edx, %%es:52(%3)\n" "14: movl 56(%4), %%eax\n" "91: movl 60(%4), %%edx\n" - " movl %%eax, 56(%3)\n" - " movl %%edx, 60(%3)\n" + " movl %%eax, %%es:56(%3)\n" + " movl %%edx, %%es:60(%3)\n" " addl $-64, %0\n" " addl $64, %4\n" " addl $64, %3\n" @@ -387,6 +514,8 @@ " movl %%eax,%0\n" "7: rep; movsb\n" "8:\n" + " pushl %%ss\n" + " popl %%ds\n" ".section .fixup,\"ax\"\n" "9: lea 0(%%eax,%0,4),%0\n" "16: pushl %0\n" @@ -421,7 +550,7 @@ " .long 7b,16b\n" ".previous" : "=&c"(size), "=&D" (d0), "=&S" (d1) - : "1"(to), "2"(from), "0"(size) + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) : "eax", "edx", "memory"); return size; } @@ -437,6 +566,7 @@ int d0, d1; __asm__ __volatile__( + " movw %w6, %%ds\n" " .align 2,0x90\n" "0: movl 32(%4), %%eax\n" " cmpl $67, %0\n" @@ -445,36 +575,36 @@ " .align 2,0x90\n" "2: movl 0(%4), %%eax\n" "21: movl 4(%4), %%edx\n" - " movnti %%eax, 0(%3)\n" - " movnti %%edx, 4(%3)\n" + " movnti %%eax, %%es:0(%3)\n" + " movnti %%edx, %%es:4(%3)\n" "3: movl 8(%4), %%eax\n" "31: movl 12(%4),%%edx\n" - " movnti %%eax, 8(%3)\n" - " movnti %%edx, 12(%3)\n" + " movnti %%eax, %%es:8(%3)\n" + " movnti %%edx, %%es:12(%3)\n" "4: movl 16(%4), %%eax\n" "41: movl 20(%4), %%edx\n" - " movnti %%eax, 16(%3)\n" - " movnti %%edx, 20(%3)\n" + " movnti %%eax, %%es:16(%3)\n" + " movnti %%edx, %%es:20(%3)\n" "10: movl 24(%4), %%eax\n" "51: movl 28(%4), %%edx\n" - " movnti %%eax, 24(%3)\n" - " movnti %%edx, 28(%3)\n" + " movnti %%eax, %%es:24(%3)\n" + " movnti %%edx, %%es:28(%3)\n" "11: movl 32(%4), %%eax\n" "61: movl 36(%4), %%edx\n" - " movnti %%eax, 32(%3)\n" - " movnti %%edx, 36(%3)\n" + " movnti %%eax, %%es:32(%3)\n" + " movnti %%edx, %%es:36(%3)\n" "12: movl 40(%4), %%eax\n" "71: movl 44(%4), %%edx\n" - " movnti %%eax, 40(%3)\n" - " movnti %%edx, 44(%3)\n" + " movnti %%eax, %%es:40(%3)\n" + " movnti %%edx, %%es:44(%3)\n" "13: movl 48(%4), %%eax\n" "81: movl 52(%4), %%edx\n" - " movnti %%eax, 48(%3)\n" - " movnti %%edx, 52(%3)\n" + " movnti %%eax, %%es:48(%3)\n" + " movnti %%edx, %%es:52(%3)\n" "14: movl 56(%4), %%eax\n" "91: movl 60(%4), %%edx\n" - " movnti %%eax, 56(%3)\n" - " movnti %%edx, 60(%3)\n" + " movnti %%eax, %%es:56(%3)\n" + " movnti %%edx, %%es:60(%3)\n" " addl $-64, %0\n" " addl $64, %4\n" " addl $64, %3\n" @@ -489,6 +619,8 @@ " movl %%eax,%0\n" "7: rep; movsb\n" "8:\n" + " pushl %%ss\n" + " popl %%ds\n" ".section .fixup,\"ax\"\n" "9: lea 0(%%eax,%0,4),%0\n" "16: pushl %0\n" @@ -523,7 +655,7 @@ " .long 7b,16b\n" ".previous" : "=&c"(size), "=&D" (d0), "=&S" (d1) - : "1"(to), "2"(from), "0"(size) + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) : "eax", "edx", "memory"); return size; } @@ -534,6 +666,7 @@ int d0, d1; __asm__ __volatile__( + " movw %w6, %%ds\n" " .align 2,0x90\n" "0: movl 32(%4), %%eax\n" " cmpl $67, %0\n" @@ -542,36 +675,36 @@ " .align 2,0x90\n" "2: movl 0(%4), %%eax\n" "21: movl 4(%4), %%edx\n" - " movnti %%eax, 0(%3)\n" - " movnti %%edx, 4(%3)\n" + " movnti %%eax, %%es:0(%3)\n" + " movnti %%edx, %%es:4(%3)\n" "3: movl 8(%4), %%eax\n" "31: movl 12(%4),%%edx\n" - " movnti %%eax, 8(%3)\n" - " movnti %%edx, 12(%3)\n" + " movnti %%eax, %%es:8(%3)\n" + " movnti %%edx, %%es:12(%3)\n" "4: movl 16(%4), %%eax\n" "41: movl 20(%4), %%edx\n" - " movnti %%eax, 16(%3)\n" - " movnti %%edx, 20(%3)\n" + " movnti %%eax, %%es:16(%3)\n" + " movnti %%edx, %%es:20(%3)\n" "10: movl 24(%4), %%eax\n" "51: movl 28(%4), %%edx\n" - " movnti %%eax, 24(%3)\n" - " movnti %%edx, 28(%3)\n" + " movnti %%eax, %%es:24(%3)\n" + " movnti %%edx, %%es:28(%3)\n" "11: movl 32(%4), %%eax\n" "61: movl 36(%4), %%edx\n" - " movnti %%eax, 32(%3)\n" - " movnti %%edx, 36(%3)\n" + " movnti %%eax, %%es:32(%3)\n" + " movnti %%edx, %%es:36(%3)\n" "12: movl 40(%4), %%eax\n" "71: movl 44(%4), %%edx\n" - " movnti %%eax, 40(%3)\n" - " movnti %%edx, 44(%3)\n" + " movnti %%eax, %%es:40(%3)\n" + " movnti %%edx, %%es:44(%3)\n" "13: movl 48(%4), %%eax\n" "81: movl 52(%4), %%edx\n" - " movnti %%eax, 48(%3)\n" - " movnti %%edx, 52(%3)\n" + " movnti %%eax, %%es:48(%3)\n" + " movnti %%edx, %%es:52(%3)\n" "14: movl 56(%4), %%eax\n" "91: movl 60(%4), %%edx\n" - " movnti %%eax, 56(%3)\n" - " movnti %%edx, 60(%3)\n" + " movnti %%eax, %%es:56(%3)\n" + " movnti %%edx, %%es:60(%3)\n" " addl $-64, %0\n" " addl $64, %4\n" " addl $64, %3\n" @@ -586,6 +719,8 @@ " movl %%eax,%0\n" "7: rep; movsb\n" "8:\n" + " pushl %%ss\n" + " popl %%ds\n" ".section .fixup,\"ax\"\n" "9: lea 0(%%eax,%0,4),%0\n" "16: jmp 8b\n" @@ -614,7 +749,7 @@ " .long 7b,16b\n" ".previous" : "=&c"(size), "=&D" (d0), "=&S" (d1) - : "1"(to), "2"(from), "0"(size) + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) : "eax", "edx", "memory"); return size; } @@ -627,90 +762,146 @@ */ unsigned long __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size); -unsigned long __copy_user_intel(void __user *to, const void *from, +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from, + unsigned long size); +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size); unsigned long __copy_user_zeroing_intel_nocache(void *to, const void __user *from, unsigned long size); #endif /* CONFIG_X86_INTEL_USERCOPY */ /* Generic arbitrary sized copy. */ -#define __copy_user(to,from,size) \ -do { \ - int __d0, __d1, __d2; \ - __asm__ __volatile__( \ - " cmp $7,%0\n" \ - " jbe 1f\n" \ - " movl %1,%0\n" \ - " negl %0\n" \ - " andl $7,%0\n" \ - " subl %0,%3\n" \ - "4: rep; movsb\n" \ - " movl %3,%0\n" \ - " shrl $2,%0\n" \ - " andl $3,%3\n" \ - " .align 2,0x90\n" \ - "0: rep; movsl\n" \ - " movl %3,%0\n" \ - "1: rep; movsb\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "5: addl %3,%0\n" \ - " jmp 2b\n" \ - "3: lea 0(%3,%0,4),%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 4b,5b\n" \ - " .long 0b,3b\n" \ - " .long 1b,2b\n" \ - ".previous" \ - : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ - : "3"(size), "0"(size), "1"(to), "2"(from) \ - : "memory"); \ -} while (0) - -#define __copy_user_zeroing(to,from,size) \ -do { \ - int __d0, __d1, __d2; \ - __asm__ __volatile__( \ - " cmp $7,%0\n" \ - " jbe 1f\n" \ - " movl %1,%0\n" \ - " negl %0\n" \ - " andl $7,%0\n" \ - " subl %0,%3\n" \ - "4: rep; movsb\n" \ - " movl %3,%0\n" \ - " shrl $2,%0\n" \ - " andl $3,%3\n" \ - " .align 2,0x90\n" \ - "0: rep; movsl\n" \ - " movl %3,%0\n" \ - "1: rep; movsb\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "5: addl %3,%0\n" \ - " jmp 6f\n" \ - "3: lea 0(%3,%0,4),%0\n" \ - "6: pushl %0\n" \ - " pushl %%eax\n" \ - " xorl %%eax,%%eax\n" \ - " rep; stosb\n" \ - " popl %%eax\n" \ - " popl %0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 4b,5b\n" \ - " .long 0b,3b\n" \ - " .long 1b,6b\n" \ - ".previous" \ - : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ - : "3"(size), "0"(size), "1"(to), "2"(from) \ - : "memory"); \ -} while (0) +static unsigned long +__generic_copy_to_user(void __user *to, const void *from, unsigned long size) +{ + int __d0, __d1, __d2; + + __asm__ __volatile__( + " movw %w8,%%es\n" + " cmp $7,%0\n" + " jbe 1f\n" + " movl %1,%0\n" + " negl %0\n" + " andl $7,%0\n" + " subl %0,%3\n" + "4: rep; movsb\n" + " movl %3,%0\n" + " shrl $2,%0\n" + " andl $3,%3\n" + " .align 2,0x90\n" + "0: rep; movsl\n" + " movl %3,%0\n" + "1: rep; movsb\n" + "2:\n" + " pushl %%ss\n" + " popl %%es\n" + ".section .fixup,\"ax\"\n" + "5: addl %3,%0\n" + " jmp 2b\n" + "3: lea 0(%3,%0,4),%0\n" + " jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 4b,5b\n" + " .long 0b,3b\n" + " .long 1b,2b\n" + ".previous" + : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) + : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS) + : "memory"); + return size; +} + +static unsigned long +__generic_copy_from_user(void *to, const void __user *from, unsigned long size) +{ + int __d0, __d1, __d2; + + __asm__ __volatile__( + " movw %w8,%%ds\n" + " cmp $7,%0\n" + " jbe 1f\n" + " movl %1,%0\n" + " negl %0\n" + " andl $7,%0\n" + " subl %0,%3\n" + "4: rep; movsb\n" + " movl %3,%0\n" + " shrl $2,%0\n" + " andl $3,%3\n" + " .align 2,0x90\n" + "0: rep; movsl\n" + " movl %3,%0\n" + "1: rep; movsb\n" + "2:\n" + " pushl %%ss\n" + " popl %%ds\n" + ".section .fixup,\"ax\"\n" + "5: addl %3,%0\n" + " jmp 2b\n" + "3: lea 0(%3,%0,4),%0\n" + " jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 4b,5b\n" + " .long 0b,3b\n" + " .long 1b,2b\n" + ".previous" + : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) + : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS) + : "memory"); + return size; +} + +static unsigned long +__copy_user_zeroing(void *to, const void __user *from, unsigned long size) +{ + int __d0, __d1, __d2; + + __asm__ __volatile__( + " movw %w8,%%ds\n" + " cmp $7,%0\n" + " jbe 1f\n" + " movl %1,%0\n" + " negl %0\n" + " andl $7,%0\n" + " subl %0,%3\n" + "4: rep; movsb\n" + " movl %3,%0\n" + " shrl $2,%0\n" + " andl $3,%3\n" + " .align 2,0x90\n" + "0: rep; movsl\n" + " movl %3,%0\n" + "1: rep; movsb\n" + "2:\n" + " pushl %%ss\n" + " popl %%ds\n" + ".section .fixup,\"ax\"\n" + "5: addl %3,%0\n" + " jmp 6f\n" + "3: lea 0(%3,%0,4),%0\n" + "6: pushl %0\n" + " pushl %%eax\n" + " xorl %%eax,%%eax\n" + " rep; stosb\n" + " popl %%eax\n" + " popl %0\n" + " jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 4b,5b\n" + " .long 0b,3b\n" + " .long 1b,6b\n" + ".previous" + : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) + : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS) + : "memory"); + return size; +} unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n) @@ -766,9 +957,9 @@ } #endif if (movsl_is_ok(to, from, n)) - __copy_user(to, from, n); + n = __generic_copy_to_user(to, from, n); else - n = __copy_user_intel(to, from, n); + n = __generic_copy_to_user_intel(to, from, n); return n; } EXPORT_SYMBOL(__copy_to_user_ll); @@ -778,7 +969,7 @@ { BUG_ON((long)n < 0); if (movsl_is_ok(to, from, n)) - __copy_user_zeroing(to, from, n); + n = __copy_user_zeroing(to, from, n); else n = __copy_user_zeroing_intel(to, from, n); return n; @@ -790,10 +981,9 @@ { BUG_ON((long)n < 0); if (movsl_is_ok(to, from, n)) - __copy_user(to, from, n); + n = __generic_copy_from_user(to, from, n); else - n = __copy_user_intel((void __user *)to, - (const void *)from, n); + n = __generic_copy_from_user_intel(to, from, n); return n; } EXPORT_SYMBOL(__copy_from_user_ll_nozero); @@ -804,11 +994,11 @@ BUG_ON((long)n < 0); #ifdef CONFIG_X86_INTEL_USERCOPY if ( n > 64 && cpu_has_xmm2) - n = __copy_user_zeroing_intel_nocache(to, from, n); + n = __copy_user_zeroing_intel_nocache(to, from, n); else - __copy_user_zeroing(to, from, n); + n = __copy_user_zeroing(to, from, n); #else - __copy_user_zeroing(to, from, n); + n = __copy_user_zeroing(to, from, n); #endif return n; } @@ -819,11 +1009,11 @@ BUG_ON((long)n < 0); #ifdef CONFIG_X86_INTEL_USERCOPY if ( n > 64 && cpu_has_xmm2) - n = __copy_user_intel_nocache(to, from, n); + n = __copy_user_intel_nocache(to, from, n); else - __copy_user(to, from, n); + n = __generic_copy_from_user(to, from, n); #else - __copy_user(to, from, n); + n = __generic_copy_from_user(to, from, n); #endif return n; } @@ -878,3 +1068,44 @@ return n; } EXPORT_SYMBOL(copy_from_user); + +#ifdef CONFIG_PAX_MEMORY_UDEREF +void __set_fs(mm_segment_t x, int cpu) +{ + unsigned long limit = x.seg; + __u32 a, b; + + current_thread_info()->addr_limit = x; + if (likely(limit)) + limit -= 1UL; + + pack_descriptor(&a, &b, 0UL, limit, 0xF3, 0xC); + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_DS, a, b); +} + +void set_fs(mm_segment_t x) +{ + int cpu = get_cpu(); + +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; + + pax_open_kernel(cr0); +#endif + + __set_fs(x, cpu); + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + + put_cpu_no_resched(); +} +#else +void set_fs(mm_segment_t x) +{ + current_thread_info()->addr_limit = x; +} +#endif + +EXPORT_SYMBOL(set_fs);