--- zzzz-none-000/linux-3.10.107/arch/powerpc/lib/memcpy_power7.S 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/arch/powerpc/lib/memcpy_power7.S 2021-02-04 17:41:59.000000000 +0000 @@ -20,18 +20,27 @@ #include _GLOBAL(memcpy_power7) + +#ifdef __BIG_ENDIAN__ +#define LVS(VRT,RA,RB) lvsl VRT,RA,RB +#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC +#else +#define LVS(VRT,RA,RB) lvsr VRT,RA,RB +#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC +#endif + #ifdef CONFIG_ALTIVEC cmpldi r5,16 cmpldi cr1,r5,4096 - std r3,48(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blt .Lshort_copy bgt cr1,.Lvmx_copy #else cmpldi r5,16 - std r3,48(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blt .Lshort_copy #endif @@ -207,7 +216,7 @@ lbz r0,0(r4) stb r0,0(r3) -15: ld r3,48(r1) +15: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blr .Lunwind_stack_nonvmx_copy: @@ -217,16 +226,16 @@ #ifdef CONFIG_ALTIVEC .Lvmx_copy: mflr r0 - std r4,56(r1) - std r5,64(r1) + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) + std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) - bl .enter_vmx_copy + bl enter_vmx_copy cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) - ld r3,STACKFRAMESIZE+48(r1) - ld r4,STACKFRAMESIZE+56(r1) - ld r5,STACKFRAMESIZE+64(r1) + ld r3,STK_REG(R31)(r1) + ld r4,STK_REG(R30)(r1) + ld r5,STK_REG(R29)(r1) mtlr r0 /* @@ -312,29 +321,29 @@ li r11,48 bf cr7*4+3,5f - lvx vr1,r0,r4 + lvx v1,r0,r4 addi r4,r4,16 - stvx vr1,r0,r3 + stvx v1,r0,r3 addi r3,r3,16 5: bf cr7*4+2,6f - lvx vr1,r0,r4 - lvx vr0,r4,r9 + lvx v1,r0,r4 + lvx v0,r4,r9 addi r4,r4,32 - stvx vr1,r0,r3 - stvx vr0,r3,r9 + stvx v1,r0,r3 + stvx v0,r3,r9 addi r3,r3,32 6: bf cr7*4+1,7f - lvx vr3,r0,r4 - lvx vr2,r4,r9 - lvx vr1,r4,r10 - lvx vr0,r4,r11 + lvx v3,r0,r4 + lvx v2,r4,r9 + lvx v1,r4,r10 + lvx v0,r4,r11 addi r4,r4,64 - stvx vr3,r0,r3 - stvx vr2,r3,r9 - stvx vr1,r3,r10 - stvx vr0,r3,r11 + stvx v3,r0,r3 + stvx v2,r3,r9 + stvx v1,r3,r10 + stvx v0,r3,r11 addi r3,r3,64 7: sub r5,r5,r6 @@ -357,23 +366,23 @@ */ .align 5 8: - lvx vr7,r0,r4 - lvx vr6,r4,r9 - lvx vr5,r4,r10 - lvx vr4,r4,r11 - lvx vr3,r4,r12 - lvx vr2,r4,r14 - lvx vr1,r4,r15 - lvx vr0,r4,r16 + lvx v7,r0,r4 + lvx v6,r4,r9 + lvx v5,r4,r10 + lvx v4,r4,r11 + lvx v3,r4,r12 + lvx v2,r4,r14 + lvx v1,r4,r15 + lvx v0,r4,r16 addi r4,r4,128 - stvx vr7,r0,r3 - stvx vr6,r3,r9 - stvx vr5,r3,r10 - stvx vr4,r3,r11 - stvx vr3,r3,r12 - stvx vr2,r3,r14 - stvx vr1,r3,r15 - stvx vr0,r3,r16 + stvx v7,r0,r3 + stvx v6,r3,r9 + stvx v5,r3,r10 + stvx v4,r3,r11 + stvx v3,r3,r12 + stvx v2,r3,r14 + stvx v1,r3,r15 + stvx v0,r3,r16 addi r3,r3,128 bdnz 8b @@ -387,29 +396,29 @@ mtocrf 0x01,r6 bf cr7*4+1,9f - lvx vr3,r0,r4 - lvx vr2,r4,r9 - lvx vr1,r4,r10 - lvx vr0,r4,r11 + lvx v3,r0,r4 + lvx v2,r4,r9 + lvx v1,r4,r10 + lvx v0,r4,r11 addi r4,r4,64 - stvx vr3,r0,r3 - stvx vr2,r3,r9 - stvx vr1,r3,r10 - stvx vr0,r3,r11 + stvx v3,r0,r3 + stvx v2,r3,r9 + stvx v1,r3,r10 + stvx v0,r3,r11 addi r3,r3,64 9: bf cr7*4+2,10f - lvx vr1,r0,r4 - lvx vr0,r4,r9 + lvx v1,r0,r4 + lvx v0,r4,r9 addi r4,r4,32 - stvx vr1,r0,r3 - stvx vr0,r3,r9 + stvx v1,r0,r3 + stvx v0,r3,r9 addi r3,r3,32 10: bf cr7*4+3,11f - lvx vr1,r0,r4 + lvx v1,r0,r4 addi r4,r4,16 - stvx vr1,r0,r3 + stvx v1,r0,r3 addi r3,r3,16 /* Up to 15B to go */ @@ -438,8 +447,8 @@ stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - ld r3,48(r1) - b .exit_vmx_copy /* tail call optimise */ + ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + b exit_vmx_copy /* tail call optimise */ .Lvmx_unaligned_copy: /* Get the destination 16B aligned */ @@ -485,42 +494,42 @@ li r10,32 li r11,48 - lvsl vr16,0,r4 /* Setup permute control vector */ - lvx vr0,0,r4 + LVS(v16,0,r4) /* Setup permute control vector */ + lvx v0,0,r4 addi r4,r4,16 bf cr7*4+3,5f - lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 + lvx v1,r0,r4 + VPERM(v8,v0,v1,v16) addi r4,r4,16 - stvx vr8,r0,r3 + stvx v8,r0,r3 addi r3,r3,16 - vor vr0,vr1,vr1 + vor v0,v1,v1 5: bf cr7*4+2,6f - lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 - lvx vr0,r4,r9 - vperm vr9,vr1,vr0,vr16 + lvx v1,r0,r4 + VPERM(v8,v0,v1,v16) + lvx v0,r4,r9 + VPERM(v9,v1,v0,v16) addi r4,r4,32 - stvx vr8,r0,r3 - stvx vr9,r3,r9 + stvx v8,r0,r3 + stvx v9,r3,r9 addi r3,r3,32 6: bf cr7*4+1,7f - lvx vr3,r0,r4 - vperm vr8,vr0,vr3,vr16 - lvx vr2,r4,r9 - vperm vr9,vr3,vr2,vr16 - lvx vr1,r4,r10 - vperm vr10,vr2,vr1,vr16 - lvx vr0,r4,r11 - vperm vr11,vr1,vr0,vr16 + lvx v3,r0,r4 + VPERM(v8,v0,v3,v16) + lvx v2,r4,r9 + VPERM(v9,v3,v2,v16) + lvx v1,r4,r10 + VPERM(v10,v2,v1,v16) + lvx v0,r4,r11 + VPERM(v11,v1,v0,v16) addi r4,r4,64 - stvx vr8,r0,r3 - stvx vr9,r3,r9 - stvx vr10,r3,r10 - stvx vr11,r3,r11 + stvx v8,r0,r3 + stvx v9,r3,r9 + stvx v10,r3,r10 + stvx v11,r3,r11 addi r3,r3,64 7: sub r5,r5,r6 @@ -543,31 +552,31 @@ */ .align 5 8: - lvx vr7,r0,r4 - vperm vr8,vr0,vr7,vr16 - lvx vr6,r4,r9 - vperm vr9,vr7,vr6,vr16 - lvx vr5,r4,r10 - vperm vr10,vr6,vr5,vr16 - lvx vr4,r4,r11 - vperm vr11,vr5,vr4,vr16 - lvx vr3,r4,r12 - vperm vr12,vr4,vr3,vr16 - lvx vr2,r4,r14 - vperm vr13,vr3,vr2,vr16 - lvx vr1,r4,r15 - vperm vr14,vr2,vr1,vr16 - lvx vr0,r4,r16 - vperm vr15,vr1,vr0,vr16 + lvx v7,r0,r4 + VPERM(v8,v0,v7,v16) + lvx v6,r4,r9 + VPERM(v9,v7,v6,v16) + lvx v5,r4,r10 + VPERM(v10,v6,v5,v16) + lvx v4,r4,r11 + VPERM(v11,v5,v4,v16) + lvx v3,r4,r12 + VPERM(v12,v4,v3,v16) + lvx v2,r4,r14 + VPERM(v13,v3,v2,v16) + lvx v1,r4,r15 + VPERM(v14,v2,v1,v16) + lvx v0,r4,r16 + VPERM(v15,v1,v0,v16) addi r4,r4,128 - stvx vr8,r0,r3 - stvx vr9,r3,r9 - stvx vr10,r3,r10 - stvx vr11,r3,r11 - stvx vr12,r3,r12 - stvx vr13,r3,r14 - stvx vr14,r3,r15 - stvx vr15,r3,r16 + stvx v8,r0,r3 + stvx v9,r3,r9 + stvx v10,r3,r10 + stvx v11,r3,r11 + stvx v12,r3,r12 + stvx v13,r3,r14 + stvx v14,r3,r15 + stvx v15,r3,r16 addi r3,r3,128 bdnz 8b @@ -581,36 +590,36 @@ mtocrf 0x01,r6 bf cr7*4+1,9f - lvx vr3,r0,r4 - vperm vr8,vr0,vr3,vr16 - lvx vr2,r4,r9 - vperm vr9,vr3,vr2,vr16 - lvx vr1,r4,r10 - vperm vr10,vr2,vr1,vr16 - lvx vr0,r4,r11 - vperm vr11,vr1,vr0,vr16 + lvx v3,r0,r4 + VPERM(v8,v0,v3,v16) + lvx v2,r4,r9 + VPERM(v9,v3,v2,v16) + lvx v1,r4,r10 + VPERM(v10,v2,v1,v16) + lvx v0,r4,r11 + VPERM(v11,v1,v0,v16) addi r4,r4,64 - stvx vr8,r0,r3 - stvx vr9,r3,r9 - stvx vr10,r3,r10 - stvx vr11,r3,r11 + stvx v8,r0,r3 + stvx v9,r3,r9 + stvx v10,r3,r10 + stvx v11,r3,r11 addi r3,r3,64 9: bf cr7*4+2,10f - lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 - lvx vr0,r4,r9 - vperm vr9,vr1,vr0,vr16 + lvx v1,r0,r4 + VPERM(v8,v0,v1,v16) + lvx v0,r4,r9 + VPERM(v9,v1,v0,v16) addi r4,r4,32 - stvx vr8,r0,r3 - stvx vr9,r3,r9 + stvx v8,r0,r3 + stvx v9,r3,r9 addi r3,r3,32 10: bf cr7*4+3,11f - lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 + lvx v1,r0,r4 + VPERM(v8,v0,v1,v16) addi r4,r4,16 - stvx vr8,r0,r3 + stvx v8,r0,r3 addi r3,r3,16 /* Up to 15B to go */ @@ -642,6 +651,6 @@ stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - ld r3,48(r1) - b .exit_vmx_copy /* tail call optimise */ -#endif /* CONFiG_ALTIVEC */ + ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + b exit_vmx_copy /* tail call optimise */ +#endif /* CONFIG_ALTIVEC */