--- zzzz-none-000/linux-3.10.107/arch/mips/lib/csum_partial.S 2017-06-27 09:49:32.000000000 +0000 +++ vr9-7490-729/linux-3.10.107/arch/mips/lib/csum_partial.S 2021-11-10 11:53:54.000000000 +0000 @@ -9,6 +9,17 @@ * Copyright (C) 1999 Silicon Graphics, Inc. * Copyright (C) 2007 Maciej W. Rozycki */ +/* + * Hack to resolve longstanding prefetch issue + * + * Prefetching may be fatal on some systems if we're prefetching beyond the + * end of memory on some systems. It's also a seriously bad idea on non + * dma-coherent systems. + */ +#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_MIPS_MALTA) +#undef CONFIG_CPU_HAS_PREFETCH +#endif + #include #include #include @@ -43,6 +54,8 @@ #define ADD daddu #define NBYTES 8 +#define LOADK ld + #else #define LOAD lw @@ -50,6 +63,8 @@ #define ADD addu #define NBYTES 4 +#define LOADK lw + #endif /* USE_DOUBLE */ #define UNIT(unit) ((unit)*NBYTES) @@ -417,12 +432,18 @@ * * If len < NBYTES use byte operations. */ - sltu t2, len, NBYTES + PREF( 0, 0(src) ) + PREF( 1, 0(dst) ) + sltu t2, len, NBYTES and t1, dst, ADDRMASK - bnez t2, .Lcopy_bytes_checklen + PREF( 0, 1*32(src) ) + PREF( 1, 1*32(dst) ) + bnez t2, .Lcopy_bytes_checklen and t0, src, ADDRMASK andi odd, dst, 0x1 /* odd buffer? */ - bnez t1, .Ldst_unaligned + PREF( 0, 2*32(src) ) + PREF( 1, 2*32(dst) ) + bnez t1, .Ldst_unaligned nop bnez t0, .Lsrc_unaligned_dst_aligned /* @@ -434,7 +455,9 @@ beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES nop SUB len, 8*NBYTES # subtract here for bgez loop - .align 4 + PREF( 0, 3*32(src) ) + PREF( 1, 3*32(dst) ) + .align 4 1: EXC( LOAD t0, UNIT(0)(src), .Ll_exc) EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy) @@ -462,8 +485,10 @@ ADDC(sum, t6) EXC( STORE t7, UNIT(7)(dst), .Ls_exc) ADDC(sum, t7) - .set reorder /* DADDI_WAR */ + .set reorder /* DADDI_WAR */ ADD dst, dst, 8*NBYTES + PREF( 0, 8*32(src) ) + PREF( 1, 8*32(dst) ) bgez len, 1b .set noreorder ADD len, 8*NBYTES # revert len (see above) @@ -568,9 +593,11 @@ ADD src, src, t2 .Lsrc_unaligned_dst_aligned: - SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter + SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter + PREF( 0, 3*32(src) ) beqz t0, .Lcleanup_src_unaligned - and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES + and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES + PREF( 1, 3*32(dst) ) 1: /* * Avoid consecutive LD*'s to the same register since some mips @@ -587,7 +614,8 @@ EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy) EXC( LDREST t2, REST(2)(src), .Ll_exc_copy) EXC( LDREST t3, REST(3)(src), .Ll_exc_copy) - ADD src, src, 4*NBYTES + PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) + ADD src, src, 4*NBYTES #ifdef CONFIG_CPU_SB1 nop # improves slotting #endif @@ -600,7 +628,8 @@ EXC( STORE t3, UNIT(3)(dst), .Ls_exc) ADDC(sum, t3) .set reorder /* DADDI_WAR */ - ADD dst, dst, 4*NBYTES + PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) + ADD dst, dst, 4*NBYTES bne len, rem, 1b .set noreorder @@ -700,9 +729,9 @@ * * Assumes src < THREAD_BUADDR($28) */ - LOAD t0, TI_TASK($28) + LOADK t0, TI_TASK($28) li t2, SHIFT_START - LOAD t0, THREAD_BUADDR(t0) + LOADK t0, THREAD_BUADDR(t0) 1: EXC( lbu t1, 0(src), .Ll_exc) ADD src, src, 1 @@ -715,9 +744,9 @@ bne src, t0, 1b .set noreorder .Ll_exc: - LOAD t0, TI_TASK($28) + LOADK t0, TI_TASK($28) nop - LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address + LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address nop SUB len, AT, t0 # len number of uncopied bytes /* @@ -758,3 +787,738 @@ sw v1, (errptr) .set pop END(__csum_partial_copy_user) + + +#ifdef CONFIG_EVA + + .set eva + +#undef LOAD +#undef LOADL +#undef LOADR +#undef STORE +#undef STOREL +#undef STORER +#undef LDFIRST +#undef LDREST +#undef STFIRST +#undef STREST +#undef COPY_BYTE + +#define LOAD lwe +#define LOADL lwle +#define LOADR lwre +#define STOREL swl +#define STORER swr +#define STORE sw + +#ifdef CONFIG_CPU_LITTLE_ENDIAN +#define LDFIRST LOADR +#define LDREST LOADL +#define STFIRST STORER +#define STREST STOREL +#else +#define LDFIRST LOADL +#define LDREST LOADR +#define STFIRST STOREL +#define STREST STORER +#endif + +LEAF(__csum_partial_copy_fromuser) + PTR_ADDU AT, src, len /* See (1) above. */ +#ifdef CONFIG_64BIT + move errptr, a4 +#else + lw errptr, 16(sp) +#endif + move sum, zero + move odd, zero + /* + * Note: dst & src may be unaligned, len may be 0 + * Temps + */ + /* + * The "issue break"s below are very approximate. + * Issue delays for dcache fills will perturb the schedule, as will + * load queue full replay traps, etc. + * + * If len < NBYTES use byte operations. + */ + PREFE( 0, 0(src) ) + PREF( 1, 0(dst) ) + sltu t2, len, NBYTES + and t1, dst, ADDRMASK + PREFE( 0, 1*32(src) ) + PREF( 1, 1*32(dst) ) + bnez t2, .LFcopy_bytes_checklen + and t0, src, ADDRMASK + andi odd, dst, 0x1 /* odd buffer? */ + PREFE( 0, 2*32(src) ) + PREF( 1, 2*32(dst) ) + bnez t1, .LFdst_unaligned + nop + bnez t0, .LFsrc_unaligned_dst_aligned + /* + * use delay slot for fall-through + * src and dst are aligned; need to compute rem + */ +.LFboth_aligned: + SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter + beqz t0, .LFcleanup_both_aligned # len < 8*NBYTES + nop + SUB len, 8*NBYTES # subtract here for bgez loop + PREFE( 0, 3*32(src) ) + PREF( 1, 3*32(dst) ) + .align 4 +1: +EXC( LOAD t0, UNIT(0)(src), .LFl_exc) +EXC( LOAD t1, UNIT(1)(src), .LFl_exc_copy) +EXC( LOAD t2, UNIT(2)(src), .LFl_exc_copy) +EXC( LOAD t3, UNIT(3)(src), .LFl_exc_copy) +EXC( LOAD t4, UNIT(4)(src), .LFl_exc_copy) +EXC( LOAD t5, UNIT(5)(src), .LFl_exc_copy) +EXC( LOAD t6, UNIT(6)(src), .LFl_exc_copy) +EXC( LOAD t7, UNIT(7)(src), .LFl_exc_copy) + SUB len, len, 8*NBYTES + ADD src, src, 8*NBYTES + STORE t0, UNIT(0)(dst) + ADDC(sum, t0) + STORE t1, UNIT(1)(dst) + ADDC(sum, t1) + STORE t2, UNIT(2)(dst) + ADDC(sum, t2) + STORE t3, UNIT(3)(dst) + ADDC(sum, t3) + STORE t4, UNIT(4)(dst) + ADDC(sum, t4) + STORE t5, UNIT(5)(dst) + ADDC(sum, t5) + STORE t6, UNIT(6)(dst) + ADDC(sum, t6) + STORE t7, UNIT(7)(dst) + ADDC(sum, t7) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 8*NBYTES + PREFE( 0, 8*32(src) ) + PREF( 1, 8*32(dst) ) + bgez len, 1b + .set noreorder + ADD len, 8*NBYTES # revert len (see above) + + /* + * len == the number of bytes left to copy < 8*NBYTES + */ +.LFcleanup_both_aligned: + beqz len, .LFdone + sltu t0, len, 4*NBYTES + bnez t0, .LFless_than_4units + and rem, len, (NBYTES-1) # rem = len % NBYTES + /* + * len >= 4*NBYTES + */ +EXC( LOAD t0, UNIT(0)(src), .LFl_exc) +EXC( LOAD t1, UNIT(1)(src), .LFl_exc_copy) +EXC( LOAD t2, UNIT(2)(src), .LFl_exc_copy) +EXC( LOAD t3, UNIT(3)(src), .LFl_exc_copy) + SUB len, len, 4*NBYTES + ADD src, src, 4*NBYTES + STORE t0, UNIT(0)(dst) + ADDC(sum, t0) + STORE t1, UNIT(1)(dst) + ADDC(sum, t1) + STORE t2, UNIT(2)(dst) + ADDC(sum, t2) + STORE t3, UNIT(3)(dst) + ADDC(sum, t3) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 4*NBYTES + beqz len, .LFdone + .set noreorder +.LFless_than_4units: + /* + * rem = len % NBYTES + */ + beq rem, len, .LFcopy_bytes + nop +1: +EXC( LOAD t0, 0(src), .LFl_exc) + ADD src, src, NBYTES + SUB len, len, NBYTES + STORE t0, 0(dst) + ADDC(sum, t0) + .set reorder /* DADDI_WAR */ + ADD dst, dst, NBYTES + bne rem, len, 1b + .set noreorder + + /* + * src and dst are aligned, need to copy rem bytes (rem < NBYTES) + * A loop would do only a byte at a time with possible branch + * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE + * because can't assume read-access to dst. Instead, use + * STREST dst, which doesn't require read access to dst. + * + * This code should perform better than a simple loop on modern, + * wide-issue mips processors because the code has fewer branches and + * more instruction-level parallelism. + */ + beqz len, .LFdone + ADD t1, dst, len # t1 is just past last byte of dst + li bits, 8*NBYTES + SLL rem, len, 3 # rem = number of bits to keep +EXC( LOAD t0, 0(src), .LFl_exc) + SUB bits, bits, rem # bits = number of bits to discard + SHIFT_DISCARD t0, t0, bits + STREST t0, -1(t1) + SHIFT_DISCARD_REVERT t0, t0, bits + .set reorder + ADDC(sum, t0) + b .LFdone + .set noreorder +.LFdst_unaligned: + /* + * dst is unaligned + * t0 = src & ADDRMASK + * t1 = dst & ADDRMASK; T1 > 0 + * len >= NBYTES + * + * Copy enough bytes to align dst + * Set match = (src and dst have same alignment) + */ +EXC( LDFIRST t3, FIRST(0)(src), .LFl_exc) + ADD t2, zero, NBYTES +EXC( LDREST t3, REST(0)(src), .LFl_exc_copy) + SUB t2, t2, t1 # t2 = number of bytes copied + xor match, t0, t1 + STFIRST t3, FIRST(0)(dst) + SLL t4, t1, 3 # t4 = number of bits to discard + SHIFT_DISCARD t3, t3, t4 + /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */ + ADDC(sum, t3) + beq len, t2, .LFdone + SUB len, len, t2 + ADD dst, dst, t2 + beqz match, .LFboth_aligned + ADD src, src, t2 + +.LFsrc_unaligned_dst_aligned: + SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter + PREFE( 0, 3*32(src) ) + beqz t0, .LFcleanup_src_unaligned + and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES + PREF( 1, 3*32(dst) ) +1: +/* + * Avoid consecutive LD*'s to the same register since some mips + * implementations can't issue them in the same cycle. + * It's OK to load FIRST(N+1) before REST(N) because the two addresses + * are to the same unit (unless src is aligned, but it's not). + */ +EXC( LDFIRST t0, FIRST(0)(src), .LFl_exc) +EXC( LDFIRST t1, FIRST(1)(src), .LFl_exc_copy) + SUB len, len, 4*NBYTES +EXC( LDREST t0, REST(0)(src), .LFl_exc_copy) +EXC( LDREST t1, REST(1)(src), .LFl_exc_copy) +EXC( LDFIRST t2, FIRST(2)(src), .LFl_exc_copy) +EXC( LDFIRST t3, FIRST(3)(src), .LFl_exc_copy) +EXC( LDREST t2, REST(2)(src), .LFl_exc_copy) +EXC( LDREST t3, REST(3)(src), .LFl_exc_copy) + PREFE( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) + ADD src, src, 4*NBYTES +#ifdef CONFIG_CPU_SB1 + nop # improves slotting +#endif + STORE t0, UNIT(0)(dst) + ADDC(sum, t0) + STORE t1, UNIT(1)(dst) + ADDC(sum, t1) + STORE t2, UNIT(2)(dst) + ADDC(sum, t2) + STORE t3, UNIT(3)(dst) + ADDC(sum, t3) + PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 4*NBYTES + bne len, rem, 1b + .set noreorder + +.LFcleanup_src_unaligned: + beqz len, .LFdone + and rem, len, NBYTES-1 # rem = len % NBYTES + beq rem, len, .LFcopy_bytes + nop +1: +EXC( LDFIRST t0, FIRST(0)(src), .LFl_exc) +EXC( LDREST t0, REST(0)(src), .LFl_exc_copy) + ADD src, src, NBYTES + SUB len, len, NBYTES + STORE t0, 0(dst) + ADDC(sum, t0) + .set reorder /* DADDI_WAR */ + ADD dst, dst, NBYTES + bne len, rem, 1b + .set noreorder + +.LFcopy_bytes_checklen: + beqz len, .LFdone + nop +.LFcopy_bytes: + /* 0 < len < NBYTES */ + move t2, zero # partial word + li t3, SHIFT_START # shift +/* use .Ll_exc_copy here to return correct sum on fault */ +#define COPY_BYTE(N) \ +EXC( lbue t0, N(src), .LFl_exc_copy); \ + SUB len, len, 1; \ + sb t0, N(dst); \ + SLLV t0, t0, t3; \ + addu t3, SHIFT_INC; \ + beqz len, .LFcopy_bytes_done; \ + or t2, t0 + + COPY_BYTE(0) + COPY_BYTE(1) +#ifdef USE_DOUBLE + COPY_BYTE(2) + COPY_BYTE(3) + COPY_BYTE(4) + COPY_BYTE(5) +#endif +EXC( lbue t0, NBYTES-2(src), .LFl_exc_copy) + SUB len, len, 1 + sb t0, NBYTES-2(dst) + SLLV t0, t0, t3 + or t2, t0 +.LFcopy_bytes_done: + ADDC(sum, t2) +.LFdone: + /* fold checksum */ +#ifdef USE_DOUBLE + dsll32 v1, sum, 0 + daddu sum, v1 + sltu v1, sum, v1 + dsra32 sum, sum, 0 + addu sum, v1 +#endif + +#ifdef CPU_MIPSR2 + wsbh v1, sum + movn sum, v1, odd +#else + beqz odd, 1f /* odd buffer alignment? */ + lui v1, 0x00ff + addu v1, 0x00ff + and t0, sum, v1 + sll t0, t0, 8 + srl sum, sum, 8 + and sum, sum, v1 + or sum, sum, t0 +1: +#endif + .set reorder + ADDC32(sum, psum) + jr ra + .set noreorder + +.LFl_exc_copy: + /* + * Copy bytes from src until faulting load address (or until a + * lb faults) + * + * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) + * may be more than a byte beyond the last address. + * Hence, the lb below may get an exception. + * + * Assumes src < THREAD_BUADDR($28) + */ + LOADK t0, TI_TASK($28) + li t2, SHIFT_START + addi t0, t0, THREAD_BUADDR + LOADK t0, 0(t0) +1: +EXC( lbue t1, 0(src), .LFl_exc) + ADD src, src, 1 + sb t1, 0(dst) # can't fault -- we're copy_from_user + SLLV t1, t1, t2 + addu t2, SHIFT_INC + ADDC(sum, t1) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 1 + bne src, t0, 1b + .set noreorder +.LFl_exc: + LOADK t0, TI_TASK($28) + addi t0, t0, THREAD_BUADDR + LOADK t0, 0(t0) # t0 is just past last good address + SUB len, AT, t0 # len number of uncopied bytes + /* + * Here's where we rely on src and dst being incremented in tandem, + * See (3) above. + * dst += (fault addr - src) to put dst at first byte to clear + */ + ADD dst, t0 # compute start address in a1 + SUB dst, src + /* + * Clear len bytes starting at dst. Can't call __bzero because it + * might modify len. An inefficient loop for these rare times... + */ + .set reorder /* DADDI_WAR */ + SUB src, len, 1 + beqz len, .LFdone + .set noreorder +1: sb zero, 0(dst) + ADD dst, dst, 1 + .set push + .set noat +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS + bnez src, 1b + SUB src, src, 1 +#else + li v1, 1 + bnez src, 1b + SUB src, src, v1 +#endif + li v1, -EFAULT + b .LFdone + sw v1, (errptr) + + .set pop + END(__csum_partial_copy_fromuser) + + + +#undef LOAD +#undef LOADL +#undef LOADR +#undef STORE +#undef STOREL +#undef STORER +#undef LDFIRST +#undef LDREST +#undef STFIRST +#undef STREST +#undef COPY_BYTE + +#define LOAD lw +#define LOADL lwl +#define LOADR lwr +#define STOREL swle +#define STORER swre +#define STORE swe + +#ifdef CONFIG_CPU_LITTLE_ENDIAN +#define LDFIRST LOADR +#define LDREST LOADL +#define STFIRST STORER +#define STREST STOREL +#else +#define LDFIRST LOADL +#define LDREST LOADR +#define STFIRST STOREL +#define STREST STORER +#endif + +LEAF(__csum_partial_copy_touser) + PTR_ADDU AT, src, len /* See (1) above. */ +#ifdef CONFIG_64BIT + move errptr, a4 +#else + lw errptr, 16(sp) +#endif + move sum, zero + move odd, zero + /* + * Note: dst & src may be unaligned, len may be 0 + * Temps + */ + /* + * The "issue break"s below are very approximate. + * Issue delays for dcache fills will perturb the schedule, as will + * load queue full replay traps, etc. + * + * If len < NBYTES use byte operations. + */ + PREF( 0, 0(src) ) + PREFE( 1, 0(dst) ) + sltu t2, len, NBYTES + and t1, dst, ADDRMASK + PREF( 0, 1*32(src) ) + PREFE( 1, 1*32(dst) ) + bnez t2, .LTcopy_bytes_checklen + and t0, src, ADDRMASK + andi odd, dst, 0x1 /* odd buffer? */ + PREF( 0, 2*32(src) ) + PREFE( 1, 2*32(dst) ) + bnez t1, .LTdst_unaligned + nop + bnez t0, .LTsrc_unaligned_dst_aligned + /* + * use delay slot for fall-through + * src and dst are aligned; need to compute rem + */ +.LTboth_aligned: + SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter + beqz t0, .LTcleanup_both_aligned # len < 8*NBYTES + nop + SUB len, 8*NBYTES # subtract here for bgez loop + PREF( 0, 3*32(src) ) + PREFE( 1, 3*32(dst) ) + .align 4 +1: + LOAD t0, UNIT(0)(src) + LOAD t1, UNIT(1)(src) + LOAD t2, UNIT(2)(src) + LOAD t3, UNIT(3)(src) + LOAD t4, UNIT(4)(src) + LOAD t5, UNIT(5)(src) + LOAD t6, UNIT(6)(src) + LOAD t7, UNIT(7)(src) + SUB len, len, 8*NBYTES + ADD src, src, 8*NBYTES +EXC( STORE t0, UNIT(0)(dst), .LTs_exc) + ADDC(sum, t0) +EXC( STORE t1, UNIT(1)(dst), .LTs_exc) + ADDC(sum, t1) +EXC( STORE t2, UNIT(2)(dst), .LTs_exc) + ADDC(sum, t2) +EXC( STORE t3, UNIT(3)(dst), .LTs_exc) + ADDC(sum, t3) +EXC( STORE t4, UNIT(4)(dst), .LTs_exc) + ADDC(sum, t4) +EXC( STORE t5, UNIT(5)(dst), .LTs_exc) + ADDC(sum, t5) +EXC( STORE t6, UNIT(6)(dst), .LTs_exc) + ADDC(sum, t6) +EXC( STORE t7, UNIT(7)(dst), .LTs_exc) + ADDC(sum, t7) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 8*NBYTES + PREF( 0, 8*32(src) ) + PREFE( 1, 8*32(dst) ) + bgez len, 1b + .set noreorder + ADD len, 8*NBYTES # revert len (see above) + + /* + * len == the number of bytes left to copy < 8*NBYTES + */ +.LTcleanup_both_aligned: + beqz len, .LTdone + sltu t0, len, 4*NBYTES + bnez t0, .LTless_than_4units + and rem, len, (NBYTES-1) # rem = len % NBYTES + /* + * len >= 4*NBYTES + */ + LOAD t0, UNIT(0)(src) + LOAD t1, UNIT(1)(src) + LOAD t2, UNIT(2)(src) + LOAD t3, UNIT(3)(src) + SUB len, len, 4*NBYTES + ADD src, src, 4*NBYTES +EXC( STORE t0, UNIT(0)(dst), .LTs_exc) + ADDC(sum, t0) +EXC( STORE t1, UNIT(1)(dst), .LTs_exc) + ADDC(sum, t1) +EXC( STORE t2, UNIT(2)(dst), .LTs_exc) + ADDC(sum, t2) +EXC( STORE t3, UNIT(3)(dst), .LTs_exc) + ADDC(sum, t3) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 4*NBYTES + beqz len, .LTdone + .set noreorder +.LTless_than_4units: + /* + * rem = len % NBYTES + */ + beq rem, len, .LTcopy_bytes + nop +1: + LOAD t0, 0(src) + ADD src, src, NBYTES + SUB len, len, NBYTES +EXC( STORE t0, 0(dst), .LTs_exc) + ADDC(sum, t0) + .set reorder /* DADDI_WAR */ + ADD dst, dst, NBYTES + bne rem, len, 1b + .set noreorder + + /* + * src and dst are aligned, need to copy rem bytes (rem < NBYTES) + * A loop would do only a byte at a time with possible branch + * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE + * because can't assume read-access to dst. Instead, use + * STREST dst, which doesn't require read access to dst. + * + * This code should perform better than a simple loop on modern, + * wide-issue mips processors because the code has fewer branches and + * more instruction-level parallelism. + */ + beqz len, .LTdone + ADD t1, dst, len # t1 is just past last byte of dst + li bits, 8*NBYTES + SLL rem, len, 3 # rem = number of bits to keep + LOAD t0, 0(src) + SUB bits, bits, rem # bits = number of bits to discard + SHIFT_DISCARD t0, t0, bits +EXC( STREST t0, -1(t1), .LTs_exc) + SHIFT_DISCARD_REVERT t0, t0, bits + .set reorder + ADDC(sum, t0) + b .LTdone + .set noreorder +.LTdst_unaligned: + /* + * dst is unaligned + * t0 = src & ADDRMASK + * t1 = dst & ADDRMASK; T1 > 0 + * len >= NBYTES + * + * Copy enough bytes to align dst + * Set match = (src and dst have same alignment) + */ + LDFIRST t3, FIRST(0)(src) + ADD t2, zero, NBYTES + LDREST t3, REST(0)(src) + SUB t2, t2, t1 # t2 = number of bytes copied + xor match, t0, t1 +EXC( STFIRST t3, FIRST(0)(dst), .LTs_exc) + SLL t4, t1, 3 # t4 = number of bits to discard + SHIFT_DISCARD t3, t3, t4 + /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */ + ADDC(sum, t3) + beq len, t2, .LTdone + SUB len, len, t2 + ADD dst, dst, t2 + beqz match, .LTboth_aligned + ADD src, src, t2 + +.LTsrc_unaligned_dst_aligned: + SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter + PREF( 0, 3*32(src) ) + beqz t0, .LTcleanup_src_unaligned + and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES + PREFE( 1, 3*32(dst) ) +1: +/* + * Avoid consecutive LD*'s to the same register since some mips + * implementations can't issue them in the same cycle. + * It's OK to load FIRST(N+1) before REST(N) because the two addresses + * are to the same unit (unless src is aligned, but it's not). + */ + LDFIRST t0, FIRST(0)(src) + LDFIRST t1, FIRST(1)(src) + SUB len, len, 4*NBYTES + LDREST t0, REST(0)(src) + LDREST t1, REST(1)(src) + LDFIRST t2, FIRST(2)(src) + LDFIRST t3, FIRST(3)(src) + LDREST t2, REST(2)(src) + LDREST t3, REST(3)(src) + PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) + ADD src, src, 4*NBYTES +#ifdef CONFIG_CPU_SB1 + nop # improves slotting +#endif +EXC( STORE t0, UNIT(0)(dst), .LTs_exc) + ADDC(sum, t0) +EXC( STORE t1, UNIT(1)(dst), .LTs_exc) + ADDC(sum, t1) +EXC( STORE t2, UNIT(2)(dst), .LTs_exc) + ADDC(sum, t2) +EXC( STORE t3, UNIT(3)(dst), .LTs_exc) + ADDC(sum, t3) + PREFE( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 4*NBYTES + bne len, rem, 1b + .set noreorder + +.LTcleanup_src_unaligned: + beqz len, .LTdone + and rem, len, NBYTES-1 # rem = len % NBYTES + beq rem, len, .LTcopy_bytes + nop +1: + LDFIRST t0, FIRST(0)(src) + LDREST t0, REST(0)(src) + ADD src, src, NBYTES + SUB len, len, NBYTES +EXC( STORE t0, 0(dst), .LTs_exc) + ADDC(sum, t0) + .set reorder /* DADDI_WAR */ + ADD dst, dst, NBYTES + bne len, rem, 1b + .set noreorder + +.LTcopy_bytes_checklen: + beqz len, .LTdone + nop +.LTcopy_bytes: + /* 0 < len < NBYTES */ + move t2, zero # partial word + li t3, SHIFT_START # shift +/* use .Ll_exc_copy here to return correct sum on fault */ +#define COPY_BYTE(N) \ + lbu t0, N(src); \ + SUB len, len, 1; \ +EXC( sbe t0, N(dst), .LTs_exc); \ + SLLV t0, t0, t3; \ + addu t3, SHIFT_INC; \ + beqz len, .LTcopy_bytes_done; \ + or t2, t0 + + COPY_BYTE(0) + COPY_BYTE(1) +#ifdef USE_DOUBLE + COPY_BYTE(2) + COPY_BYTE(3) + COPY_BYTE(4) + COPY_BYTE(5) +#endif + lbu t0, NBYTES-2(src) + SUB len, len, 1 +EXC( sbe t0, NBYTES-2(dst), .LTs_exc) + SLLV t0, t0, t3 + or t2, t0 +.LTcopy_bytes_done: + ADDC(sum, t2) +.LTdone: + /* fold checksum */ +#ifdef USE_DOUBLE + dsll32 v1, sum, 0 + daddu sum, v1 + sltu v1, sum, v1 + dsra32 sum, sum, 0 + addu sum, v1 +#endif + +#ifdef CPU_MIPSR2 + wsbh v1, sum + movn sum, v1, odd +#else + beqz odd, 1f /* odd buffer alignment? */ + lui v1, 0x00ff + addu v1, 0x00ff + and t0, sum, v1 + sll t0, t0, 8 + srl sum, sum, 8 + and sum, sum, v1 + or sum, sum, t0 +1: +#endif + .set reorder + ADDC32(sum, psum) + jr ra + .set noreorder + +.LTs_exc: + li v0, -1 /* invalid checksum */ + li v1, -EFAULT + jr ra + sw v1, (errptr) + END(__csum_partial_copy_touser) + +#endif /* CONFIG_EVA */