--- zzzz-none-000/linux-5.15.111/arch/arm/kernel/phys2virt.S 2023-05-11 14:00:40.000000000 +0000 +++ puma7-arm-6670-761/linux-5.15.111/arch/arm/kernel/phys2virt.S 2024-02-07 09:27:16.000000000 +0000 @@ -5,6 +5,11 @@ * All Rights Reserved */ +/* + * Includes Intel/MXL Corporation's changes/modifications dated: 2022. + * Changed/modified portions - Copyright (c) 2022, Intel/MXL Corporation. + */ + #include #include #include @@ -38,8 +43,13 @@ strcc ip, [r0, #HIGH_OFFSET] @ save to __pv_offset high bits str r3, [r0, #LOW_OFFSET] @ save to __pv_offset low bits +#ifndef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT mov r0, r3, lsr #21 @ constant for add/sub instructions teq r3, r0, lsl #21 @ must be 2 MiB aligned +#else + mov r0, r3, lsr #16 @ constant for add/sub instructions + teq r3, r0, lsl #16 @ must be 16 KiB aligned +#endif bne 0f adr_l r4, __pv_table_begin @@ -55,8 +65,13 @@ adr_l r6, __pv_offset ldr r0, [r6, #HIGH_OFFSET] @ pv_offset high word ldr r6, [r6, #LOW_OFFSET] @ pv_offset low word +#ifndef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT cmn r0, #1 +#endif #ifdef CONFIG_THUMB2_KERNEL +# ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT +# error ARM_PATCH_PHYS_VIRT_16BIT IS NOT TESTED WITH THUMB2 KERNEL +# endif @ @ The Thumb-2 versions of the patchable sequences are @ @@ -179,10 +194,16 @@ @ MOVW | cond | 0 0 1 1 0 0 0 0 | imm4 | Rd | imm12 | @ +------+-----------------+------+------+-------+ @ +# ifndef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT moveq r0, #0x400000 @ set bit 22, mov to mvn instruction +# else +# ifdef CONFIG_CPU_ENDIAN_BE8 +# error ARM_PATCH_PHYS_VIRT_16BIT IS NOT TESTED WITH ENABLED CONFIG_CPU_ENDIAN_BE8 +# endif mov r3, r6, lsr #16 @ put offset bits 31-16 into r3 + and r3, r3, #255 @ only keep offset bits 23-20 in r3 mov r6, r6, lsr #24 @ put offset bits 31-24 into r6 - and r3, r3, #0xf0 @ only keep offset bits 23-20 in r3 +# endif b .Lnext .Lloop: ldr ip, [r7, r4] #ifdef CONFIG_ARM_LPAE @@ -199,10 +220,16 @@ b 2f 1: #endif - tst ip, #PV_IMMR_MSB @ rotation value >= 16 ? bic ip, ip, #PV_IMM8_MASK +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT + tst ip, #0x400 @ rotate shift tells us LS or MS byte + orrne ip, ip, r6 @ mask in offset bits 31-24 + orreq ip, ip, r3 @ mask in offset bits 23-16 +#else + tst ip, #PV_IMMR_MSB @ rotation value >= 16 ? orreq ip, ip, r6 ARM_BE8(, lsl #24) @ mask in offset bits 31-24 orrne ip, ip, r3 ARM_BE8(, lsl #24) @ mask in offset bits 23-20 +#endif 2: str ip, [r7, r4] add r4, r4, #4