--- zzzz-none-000/linux-3.10.107/arch/mips/include/asm/uaccess.h 2017-06-27 09:49:32.000000000 +0000 +++ vr9-7490-729/linux-3.10.107/arch/mips/include/asm/uaccess.h 2021-11-10 11:53:54.000000000 +0000 @@ -224,47 +224,96 @@ * for 32 bit mode and old iron. */ #ifdef CONFIG_32BIT +#define __GET_KERNEL_DW(val, ptr) __get_kernel_asm_ll32(val, ptr) #define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr) #endif #ifdef CONFIG_64BIT -#define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr) +#define __GET_KERNEL_DW(val, ptr) __get_kernel_asm(val, "ld", ptr) #endif +extern void __get_kernel_unknown(void); extern void __get_user_unknown(void); -#define __get_user_common(val, size, ptr) \ +#define __get_kernel_common(val, size, ptr) \ do { \ + __chk_user_ptr(ptr); \ + __gu_err = 0; \ + switch (size) { \ + case 1: __get_kernel_asm(val, "lb", ptr); break; \ + case 2: __get_kernel_asm(val, "lh", ptr); break; \ + case 4: __get_kernel_asm(val, "lw", ptr); break; \ + case 8: __GET_KERNEL_DW(val, ptr); break; \ + default: __get_kernel_unknown(); break; \ + } \ +} while (0) + +#ifdef CONFIG_EVA +#define __get_user_common(val, size, ptr) \ +do { \ + __gu_err = 0; \ switch (size) { \ - case 1: __get_user_asm(val, "lb", ptr); break; \ - case 2: __get_user_asm(val, "lh", ptr); break; \ - case 4: __get_user_asm(val, "lw", ptr); break; \ - case 8: __GET_USER_DW(val, ptr); break; \ - default: __get_user_unknown(); break; \ + case 1: __get_user_asm(val, "lbe", ptr); break; \ + case 2: __get_user_asm(val, "lhe", ptr); break; \ + case 4: __get_user_asm(val, "lwe", ptr); break; \ + case 8: __GET_USER_DW(val, ptr); break; \ + default: __get_user_unknown(); break; \ } \ } while (0) +#endif -#define __get_user_nocheck(x, ptr, size) \ +#ifndef CONFIG_EVA +#define __get_user_nocheck(x, ptr, size) \ ({ \ - int __gu_err; \ + int __gu_err; \ + __get_kernel_common((x), size, ptr); \ + __gu_err; \ +}) +#else +#define __get_user_nocheck(x, ptr, size) \ +({ \ + int __gu_err; \ + const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ \ - __chk_user_ptr(ptr); \ - __get_user_common((x), size, ptr); \ + if (segment_eq(get_fs(), KERNEL_DS)) \ + __get_kernel_common((x), size, __gu_ptr); \ + else { \ + __chk_user_ptr(ptr); \ + __get_user_common((x), size, __gu_ptr); \ + } \ __gu_err; \ }) +#endif -#define __get_user_check(x, ptr, size) \ +#ifndef CONFIG_EVA +#define __get_user_check(x, ptr, size) \ ({ \ int __gu_err = -EFAULT; \ const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ \ - might_fault(); \ - if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ - __get_user_common((x), size, __gu_ptr); \ + might_fault(); \ + if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ + __get_kernel_common((x), size, __gu_ptr); \ + \ + __gu_err; \ +}) +#else +#define __get_user_check(x, ptr, size) \ +({ \ + int __gu_err = -EFAULT; \ + const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ \ + if (segment_eq(get_fs(), KERNEL_DS)) { \ + __get_kernel_common((x), size, __gu_ptr); \ + } else { \ + might_fault(); \ + if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ + __get_user_common((x), size, __gu_ptr); \ + } \ __gu_err; \ }) +#endif -#define __get_user_asm(val, insn, addr) \ +#define __get_kernel_asm(val, insn, addr) \ { \ long __gu_tmp; \ \ @@ -285,10 +334,36 @@ (val) = (__typeof__(*(addr))) __gu_tmp; \ } +#ifdef CONFIG_EVA +#define __get_user_asm(val, insn, addr) \ +{ \ + long __gu_tmp; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set eva \n" \ + "1: " insn " %1, 0(%3) \n" \ + "2: \n" \ + " .insn \n" \ + " .section .fixup,\"ax\" \n" \ + "3: li %0, %4 \n" \ + " j 2b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " "__UA_ADDR "\t1b, 3b \n" \ + " .previous \n" \ + " .set pop \n" \ + : "=r" (__gu_err), "=r" (__gu_tmp) \ + : "0" (0), "r" (addr), "i" (-EFAULT)); \ + \ + (val) = (__typeof__(*(addr))) __gu_tmp; \ +} +#endif + /* * Get a long long 64 using 32 bit registers. */ -#define __get_user_asm_ll32(val, addr) \ +#define __get_kernel_asm_ll32(val, addr) \ { \ union { \ unsigned long long l; \ @@ -315,18 +390,115 @@ \ (val) = __gu_tmp.t; \ } +#ifdef CONFIG_EVA +#define __get_user_asm_ll32(val, addr) \ +{ \ + union { \ + unsigned long long l; \ + __typeof__(*(addr)) t; \ + } __gu_tmp; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set eva \n" \ + "1: lwe %1, (%3) \n" \ + "2: lwe %D1, 4(%3) \n" \ + "3: \n" \ + " .insn \n" \ + " .section .fixup,\"ax\" \n" \ + "4: li %0, %4 \n" \ + " move %1, $0 \n" \ + " move %D1, $0 \n" \ + " j 3b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " " __UA_ADDR " 1b, 4b \n" \ + " " __UA_ADDR " 2b, 4b \n" \ + " .previous \n" \ + " .set pop \n" \ + : "=r" (__gu_err), "=&r" (__gu_tmp.l) \ + : "0" (0), "r" (addr), "i" (-EFAULT)); \ + \ + (val) = __gu_tmp.t; \ +} +#endif + /* * Yuck. We need two variants, one for 64bit operation and one * for 32 bit mode and old iron. */ #ifdef CONFIG_32BIT +#define __PUT_KERNEL_DW(ptr) __put_kernel_asm_ll32(ptr) #define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr) #endif #ifdef CONFIG_64BIT -#define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr) +#define __PUT_KERNEL_DW(ptr) __put_kernel_asm("sd", ptr) #endif +extern void __put_kernel_unknown(void); + +#ifdef CONFIG_EVA +extern void __put_user_unknown(void); + +#define __put_kernel_common(size, ptr) \ +do { \ + switch (size) { \ + case 1: __put_kernel_asm("sb", ptr); break; \ + case 2: __put_kernel_asm("sh", ptr); break; \ + case 4: __put_kernel_asm("sw", ptr); break; \ + case 8: __PUT_KERNEL_DW(ptr); break; \ + default: __put_kernel_unknown(); break; \ + } \ +} while (0) + +#define __put_user_common(size, ptr) \ +do { \ + switch (size) { \ + case 1: __put_user_asm("sbe", ptr); break; \ + case 2: __put_user_asm("she", ptr); break; \ + case 4: __put_user_asm("swe", ptr); break; \ + case 8: __PUT_USER_DW(ptr); break; \ + default: __put_user_unknown(); break; \ + } \ +} while (0) + +#define __put_user_nocheck(x, ptr, size) \ +({ \ + __typeof__(*(ptr)) __pu_val; \ + int __pu_err = 0; \ + const __typeof__(*(ptr)) __user * __pu_ptr = (ptr); \ + \ + if (segment_eq(get_fs(), KERNEL_DS)) { \ + __chk_user_ptr(__pu_ptr); \ + __pu_val = (x); \ + __put_kernel_common(size, __pu_ptr); \ + } else { \ + __chk_user_ptr(__pu_ptr); \ + __pu_val = (x); \ + __put_user_common(size, __pu_ptr); \ + } \ + __pu_err; \ +}) + +#define __put_user_check(x, ptr, size) \ +({ \ + __typeof__(*(ptr)) __pu_val = (x); \ + int __pu_err = -EFAULT; \ + const __typeof__(*(ptr)) __user * __pu_ptr = (ptr); \ + \ + if (segment_eq(get_fs(), KERNEL_DS)) \ + __put_kernel_common(size, __pu_ptr); \ + else { \ + might_fault(); \ + if (likely(access_ok(VERIFY_WRITE, __pu_ptr, size))) \ + __put_user_common(size, __pu_ptr); \ + } \ + __pu_err; \ +}) + +#else + #define __put_user_nocheck(x, ptr, size) \ ({ \ __typeof__(*(ptr)) __pu_val; \ @@ -335,11 +507,11 @@ __chk_user_ptr(ptr); \ __pu_val = (x); \ switch (size) { \ - case 1: __put_user_asm("sb", ptr); break; \ - case 2: __put_user_asm("sh", ptr); break; \ - case 4: __put_user_asm("sw", ptr); break; \ - case 8: __PUT_USER_DW(ptr); break; \ - default: __put_user_unknown(); break; \ + case 1: __put_kernel_asm("sb", ptr); break; \ + case 2: __put_kernel_asm("sh", ptr); break; \ + case 4: __put_kernel_asm("sw", ptr); break; \ + case 8: __PUT_KERNEL_DW(ptr); break; \ + default: __put_kernel_unknown(); break; \ } \ __pu_err; \ }) @@ -353,17 +525,19 @@ might_fault(); \ if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ switch (size) { \ - case 1: __put_user_asm("sb", __pu_addr); break; \ - case 2: __put_user_asm("sh", __pu_addr); break; \ - case 4: __put_user_asm("sw", __pu_addr); break; \ - case 8: __PUT_USER_DW(__pu_addr); break; \ - default: __put_user_unknown(); break; \ + case 1: __put_kernel_asm("sb", __pu_addr); break; \ + case 2: __put_kernel_asm("sh", __pu_addr); break; \ + case 4: __put_kernel_asm("sw", __pu_addr); break; \ + case 8: __PUT_KERNEL_DW(__pu_addr); break; \ + default: __put_kernel_unknown(); break; \ } \ } \ __pu_err; \ }) +#endif /* CONFIG_EVA */ -#define __put_user_asm(insn, ptr) \ +#ifndef CONFIG_EVA +#define __put_kernel_asm(insn, ptr) \ { \ __asm__ __volatile__( \ "1: " insn " %z2, %3 # __put_user_asm\n" \ @@ -380,8 +554,49 @@ : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \ "i" (-EFAULT)); \ } +#else +#define __put_kernel_asm(insn, ptr) \ +{ \ + __asm__ __volatile__( \ + "1: " insn " %2, %3 # __put_user_asm\n" \ + "2: \n" \ + " .insn \n" \ + " .section .fixup,\"ax\" \n" \ + "3: li %0, %4 \n" \ + " j 2b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " " __UA_ADDR " 1b, 3b \n" \ + " .previous \n" \ + : "=r" (__pu_err) \ + : "0" (0), "r" (__pu_val), "o" (__m(ptr)), \ + "i" (-EFAULT)); \ +} + +#define __put_user_asm(insn, ptr) \ +{ \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set eva \n" \ + "1: " insn " %2, 0(%3) # __put_user_asm\n" \ + "2: \n" \ + " .insn \n" \ + " .section .fixup,\"ax\" \n" \ + "3: li %0, %4 \n" \ + " j 2b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " " __UA_ADDR " 1b, 3b \n" \ + " .previous \n" \ + " .set pop \n" \ + : "=r" (__pu_err) \ + : "0" (0), "r" (__pu_val), "r" (ptr), \ + "i" (-EFAULT)); \ +} +#endif + -#define __put_user_asm_ll32(ptr) \ +#define __put_kernel_asm_ll32(ptr) \ { \ __asm__ __volatile__( \ "1: sw %2, (%3) # __put_user_asm_ll32 \n" \ @@ -401,8 +616,32 @@ "i" (-EFAULT)); \ } -extern void __put_user_unknown(void); +#ifdef CONFIG_EVA +#define __put_user_asm_ll32(ptr) \ +{ \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set eva \n" \ + "1: swe %2, (%3) # __put_user_asm_ll32 \n" \ + "2: swe %D2, 4(%3) \n" \ + "3: \n" \ + " .insn \n" \ + " .section .fixup,\"ax\" \n" \ + "4: li %0, %4 \n" \ + " j 3b \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " " __UA_ADDR " 1b, 4b \n" \ + " " __UA_ADDR " 2b, 4b \n" \ + " .previous" \ + " .set pop \n" \ + : "=r" (__pu_err) \ + : "0" (0), "r" (__pu_val), "r" (ptr), \ + "i" (-EFAULT)); \ +} +#endif +#ifndef CONFIG_EVA /* * put_user_unaligned: - Write a simple value into user space. * @x: Value to copy to user space. @@ -671,6 +910,8 @@ extern void __put_user_unaligned_unknown(void); +#endif /* CONFIG_EVA */ + /* * We're generating jump to subroutines which will be outside the range of * jump instructions @@ -693,8 +934,12 @@ #endif extern size_t __copy_user(void *__to, const void *__from, size_t __n); +#ifdef CONFIG_EVA +extern size_t __copy_fromuser(void *__to, const void *__from, size_t __n); +extern size_t __copy_touser(void *__to, const void *__from, size_t __n); +#endif -#define __invoke_copy_to_user(to, from, n) \ +#define __invoke_copy_to_kernel(to, from, n) \ ({ \ register void __user *__cu_to_r __asm__("$4"); \ register const void *__cu_from_r __asm__("$5"); \ @@ -704,7 +949,7 @@ __cu_from_r = (from); \ __cu_len_r = (n); \ __asm__ __volatile__( \ - __MODULE_JAL(__copy_user) \ + __MODULE_JAL(__copy_user) \ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ @@ -712,6 +957,26 @@ __cu_len_r; \ }) +#ifdef CONFIG_EVA +#define __invoke_copy_to_user(to, from, n) \ +({ \ + register void __user *__cu_to_r __asm__("$4"); \ + register const void *__cu_from_r __asm__("$5"); \ + register long __cu_len_r __asm__("$6"); \ + \ + __cu_to_r = (to); \ + __cu_from_r = (from); \ + __cu_len_r = (n); \ + __asm__ __volatile__( \ + __MODULE_JAL(__copy_touser) \ + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : \ + : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ + DADDI_SCRATCH, "memory"); \ + __cu_len_r; \ +}) +#endif + /* * __copy_to_user: - Copy a block of data into user space, with less checking. * @to: Destination address, in user space. @@ -726,6 +991,7 @@ * Returns number of bytes that could not be copied. * On success, this will be zero. */ +#ifndef CONFIG_EVA #define __copy_to_user(to, from, n) \ ({ \ void __user *__cu_to; \ @@ -735,13 +1001,58 @@ __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ - might_fault(); \ - __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ + might_fault(); \ + __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, __cu_len); \ __cu_len; \ }) +#else +#define __copy_to_user(to, from, n) \ +({ \ + void __user *__cu_to; \ + const void *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + if (segment_eq(get_fs(), KERNEL_DS)) \ + __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, __cu_len); \ + else { \ + might_fault(); \ + __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ + } \ + __cu_len; \ +}) +#endif -extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); +#ifndef CONFIG_EVA +#define __copy_to_user_inatomic(to, from, n) \ +({ \ + void __user *__cu_to; \ + const void *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, __cu_len); \ + __cu_len; \ +}) +#define __copy_from_user_inatomic(to, from, n) \ +({ \ + void *__cu_to; \ + const void __user *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, __cu_from, \ + __cu_len); \ + __cu_len; \ +}) +#else #define __copy_to_user_inatomic(to, from, n) \ ({ \ void __user *__cu_to; \ @@ -751,7 +1062,10 @@ __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ - __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ + if (segment_eq(get_fs(), KERNEL_DS)) \ + __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, __cu_len); \ + else \ + __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ __cu_len; \ }) @@ -764,10 +1078,15 @@ __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ - __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \ - __cu_len); \ + if (segment_eq(get_fs(), KERNEL_DS)) \ + __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, __cu_from, \ + __cu_len); \ + else \ + __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \ + __cu_len); \ __cu_len; \ }) +#endif /* * copy_to_user: - Copy a block of data into user space. @@ -782,6 +1101,24 @@ * Returns number of bytes that could not be copied. * On success, this will be zero. */ +#ifndef CONFIG_EVA +#define copy_to_user(to, from, n) \ +({ \ + void __user *__cu_to; \ + const void *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \ + might_fault(); \ + __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \ + __cu_len); \ + } \ + __cu_len; \ +}) +#else #define copy_to_user(to, from, n) \ ({ \ void __user *__cu_to; \ @@ -791,15 +1128,70 @@ __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ - if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \ - might_fault(); \ - __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ - __cu_len); \ - } \ + if (segment_eq(get_fs(), KERNEL_DS)) \ + __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \ + __cu_len); \ + else \ + if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \ + might_fault(); \ + __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ + __cu_len); \ + } \ __cu_len; \ }) +#endif + +#define __invoke_copy_from_kernel(to, from, n) \ +({ \ + register void *__cu_to_r __asm__("$4"); \ + register const void __user *__cu_from_r __asm__("$5"); \ + register long __cu_len_r __asm__("$6"); \ + \ + __cu_to_r = (to); \ + __cu_from_r = (from); \ + __cu_len_r = (n); \ + __asm__ __volatile__( \ + ".set\tnoreorder\n\t" \ + __MODULE_JAL(__copy_user) \ + ".set\tnoat\n\t" \ + __UA_ADDU "\t$1, %1, %2\n\t" \ + ".set\tat\n\t" \ + ".set\treorder" \ + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : \ + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ + DADDI_SCRATCH, "memory"); \ + __cu_len_r; \ +}) + +#ifdef CONFIG_EVA +#define __invoke_copy_from_user(to, from, n) \ +({ \ + register void *__cu_to_r __asm__("$4"); \ + register const void __user *__cu_from_r __asm__("$5"); \ + register long __cu_len_r __asm__("$6"); \ + \ + __cu_to_r = (to); \ + __cu_from_r = (from); \ + __cu_len_r = (n); \ + __asm__ __volatile__( \ + ".set\tnoreorder\n\t" \ + __MODULE_JAL(__copy_fromuser) \ + ".set\tnoat\n\t" \ + __UA_ADDU "\t$1, %1, %2\n\t" \ + ".set\tat\n\t" \ + ".set\treorder" \ + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : \ + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ + DADDI_SCRATCH, "memory"); \ + __cu_len_r; \ +}) +#endif + +extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); -#define __invoke_copy_from_user(to, from, n) \ +#define __invoke_copy_from_kernel_inatomic(to, from, n) \ ({ \ register void *__cu_to_r __asm__("$4"); \ register const void __user *__cu_from_r __asm__("$5"); \ @@ -810,7 +1202,7 @@ __cu_len_r = (n); \ __asm__ __volatile__( \ ".set\tnoreorder\n\t" \ - __MODULE_JAL(__copy_user) \ + __MODULE_JAL(__copy_user_inatomic) \ ".set\tnoat\n\t" \ __UA_ADDU "\t$1, %1, %2\n\t" \ ".set\tat\n\t" \ @@ -822,6 +1214,9 @@ __cu_len_r; \ }) +#ifdef CONFIG_EVA +extern size_t __copy_fromuser_inatomic(void *__to, const void *__from, size_t __n); + #define __invoke_copy_from_user_inatomic(to, from, n) \ ({ \ register void *__cu_to_r __asm__("$4"); \ @@ -833,7 +1228,7 @@ __cu_len_r = (n); \ __asm__ __volatile__( \ ".set\tnoreorder\n\t" \ - __MODULE_JAL(__copy_user_inatomic) \ + __MODULE_JAL(__copy_fromuser_inatomic) \ ".set\tnoat\n\t" \ __UA_ADDU "\t$1, %1, %2\n\t" \ ".set\tat\n\t" \ @@ -845,6 +1240,32 @@ __cu_len_r; \ }) +extern size_t __copy_inuser(void *__to, const void *__from, size_t __n); + +#define __invoke_copy_in_user(to, from, n) \ +({ \ + register void *__cu_to_r __asm__("$4"); \ + register const void __user *__cu_from_r __asm__("$5"); \ + register long __cu_len_r __asm__("$6"); \ + \ + __cu_to_r = (to); \ + __cu_from_r = (from); \ + __cu_len_r = (n); \ + __asm__ __volatile__( \ + ".set\tnoreorder\n\t" \ + __MODULE_JAL(__copy_inuser) \ + ".set\tnoat\n\t" \ + __UA_ADDU "\t$1, %1, %2\n\t" \ + ".set\tat\n\t" \ + ".set\treorder" \ + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : \ + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ + DADDI_SCRATCH, "memory"); \ + __cu_len_r; \ +}) +#endif + /* * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space. @@ -862,6 +1283,22 @@ * If some data could not be copied, this function will pad the copied * data to the requested size using zero bytes. */ +#ifndef CONFIG_EVA +#define __copy_from_user(to, from, n) \ +({ \ + void *__cu_to; \ + const void __user *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + might_fault(); \ + __cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from, \ + __cu_len); \ + __cu_len; \ +}) +#else #define __copy_from_user(to, from, n) \ ({ \ void *__cu_to; \ @@ -873,9 +1310,10 @@ __cu_len = (n); \ might_fault(); \ __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ - __cu_len); \ + __cu_len); \ __cu_len; \ }) +#endif /* * copy_from_user: - Copy a block of data from user space. @@ -893,7 +1331,25 @@ * If some data could not be copied, this function will pad the copied * data to the requested size using zero bytes. */ -#define copy_from_user(to, from, n) \ +#ifndef CONFIG_EVA +#define copy_from_user(to, from, n) \ +({ \ + void *__cu_to; \ + const void __user *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ + might_fault(); \ + __cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from, \ + __cu_len); \ + } \ + __cu_len; \ +}) +#else +#define copy_from_user(to, from, n) \ ({ \ void *__cu_to; \ const void __user *__cu_from; \ @@ -902,14 +1358,53 @@ __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ - if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ - might_fault(); \ - __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ - __cu_len); \ - } \ + if (segment_eq(get_fs(), KERNEL_DS)) \ + __cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from, \ + __cu_len); \ + else \ + if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ + might_fault(); \ + __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ + __cu_len); \ + } \ __cu_len; \ }) +#endif +#ifndef CONFIG_EVA +#define __copy_in_user(to, from, n) \ +({ \ + void __user *__cu_to; \ + const void __user *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + might_fault(); \ + __cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from, \ + __cu_len); \ + __cu_len; \ +}) + +#define copy_in_user(to, from, n) \ +({ \ + void __user *__cu_to; \ + const void __user *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \ + access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \ + might_fault(); \ + __cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from, \ + __cu_len); \ + } \ + __cu_len; \ +}) +#else #define __copy_in_user(to, from, n) \ ({ \ void __user *__cu_to; \ @@ -920,8 +1415,8 @@ __cu_from = (from); \ __cu_len = (n); \ might_fault(); \ - __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ - __cu_len); \ + __cu_len = __invoke_copy_in_user(__cu_to, __cu_from, \ + __cu_len); \ __cu_len; \ }) @@ -937,13 +1432,14 @@ if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \ access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \ might_fault(); \ - __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ - __cu_len); \ + __cu_len = __invoke_copy_in_user(__cu_to, __cu_from, \ + __cu_len); \ } else { \ memset(__cu_to, 0, __cu_len); \ } \ __cu_len; \ }) +#endif /* * __clear_user: - Zero a block of memory in user space, with less checking. @@ -966,7 +1462,11 @@ "move\t$4, %1\n\t" "move\t$5, $0\n\t" "move\t$6, %2\n\t" +#ifndef CONFIG_EVA __MODULE_JAL(__bzero) +#else + __MODULE_JAL(__bzero_user) +#endif "move\t%0, $6" : "=r" (res) : "r" (addr), "r" (size) @@ -1015,7 +1515,11 @@ "move\t$4, %1\n\t" "move\t$5, %2\n\t" "move\t$6, %3\n\t" +#ifndef CONFIG_EVA + __MODULE_JAL(__strncpy_from_kernel_nocheck_asm) +#else __MODULE_JAL(__strncpy_from_user_nocheck_asm) +#endif "move\t%0, $2" : "=r" (res) : "r" (__to), "r" (__from), "r" (__len) @@ -1042,6 +1546,7 @@ * If @count is smaller than the length of the string, copies @count bytes * and returns @count. */ +#ifndef CONFIG_EVA static inline long strncpy_from_user(char *__to, const char __user *__from, long __len) { @@ -1052,6 +1557,37 @@ "move\t$4, %1\n\t" "move\t$5, %2\n\t" "move\t$6, %3\n\t" + __MODULE_JAL(__strncpy_from_kernel_asm) + "move\t%0, $2" + : "=r" (res) + : "r" (__to), "r" (__from), "r" (__len) + : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); + + return res; +} +#else +static inline long +strncpy_from_user(char *__to, const char __user *__from, long __len) +{ + long res; + + if (segment_eq(get_fs(), KERNEL_DS)) { + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, %2\n\t" + "move\t$6, %3\n\t" + __MODULE_JAL(__strncpy_from_kernel_asm) + "move\t%0, $2" + : "=r" (res) + : "r" (__to), "r" (__from), "r" (__len) + : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); + return res; + } + might_fault(); + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, %2\n\t" + "move\t$6, %3\n\t" __MODULE_JAL(__strncpy_from_user_asm) "move\t%0, $2" : "=r" (res) @@ -1060,6 +1596,7 @@ return res; } +#endif /* Returns: 0 if bad, string length+1 (memory size) of string if ok */ static inline long __strlen_user(const char __user *s) @@ -1069,7 +1606,11 @@ might_fault(); __asm__ __volatile__( "move\t$4, %1\n\t" +#ifndef CONFIG_EVA + __MODULE_JAL(__strlen_kernel_nocheck_asm) +#else __MODULE_JAL(__strlen_user_nocheck_asm) +#endif "move\t%0, $2" : "=r" (res) : "r" (s) @@ -1099,7 +1640,11 @@ might_fault(); __asm__ __volatile__( "move\t$4, %1\n\t" +#ifndef CONFIG_EVA + __MODULE_JAL(__strlen_kernel_asm) +#else __MODULE_JAL(__strlen_user_asm) +#endif "move\t%0, $2" : "=r" (res) : "r" (s) @@ -1117,7 +1662,11 @@ __asm__ __volatile__( "move\t$4, %1\n\t" "move\t$5, %2\n\t" +#ifndef CONFIG_EVA + __MODULE_JAL(__strnlen_kernel_nocheck_asm) +#else __MODULE_JAL(__strnlen_user_nocheck_asm) +#endif "move\t%0, $2" : "=r" (res) : "r" (s), "r" (n) @@ -1140,6 +1689,7 @@ * If there is a limit on the length of a valid string, you may wish to * consider using strnlen_user() instead. */ +#ifndef CONFIG_EVA static inline long strnlen_user(const char __user *s, long n) { long res; @@ -1148,6 +1698,34 @@ __asm__ __volatile__( "move\t$4, %1\n\t" "move\t$5, %2\n\t" + __MODULE_JAL(__strnlen_kernel_asm) + "move\t%0, $2" + : "=r" (res) + : "r" (s), "r" (n) + : "$2", "$4", "$5", __UA_t0, "$31"); + + return res; +} +#else +static inline long strnlen_user(const char __user *s, long n) +{ + long res; + + if (segment_eq(get_fs(), KERNEL_DS)) { + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, %2\n\t" + __MODULE_JAL(__strnlen_kernel_asm) + "move\t%0, $2" + : "=r" (res) + : "r" (s), "r" (n) + : "$2", "$4", "$5", __UA_t0, "$31"); + return res; + } + might_fault(); + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, %2\n\t" __MODULE_JAL(__strnlen_user_asm) "move\t%0, $2" : "=r" (res) @@ -1156,6 +1734,7 @@ return res; } +#endif struct exception_table_entry {