--- zzzz-none-000/linux-2.6.19.2/arch/i386/kernel/entry.S 2007-01-10 19:10:37.000000000 +0000 +++ davinci-8020-5505/linux-2.6.19.2/arch/i386/kernel/entry.S 2007-01-19 14:42:56.000000000 +0000 @@ -82,6 +82,8 @@ #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit #define INTERRUPT_RETURN iret #define GET_CR0_INTO_EAX movl %cr0, %eax +#define GET_CR0_INTO_EDX movl %cr0, %edx +#define SET_CR0_FROM_EDX movl %edx, %cr0 #ifdef CONFIG_PREEMPT #define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF @@ -105,7 +107,7 @@ #define resume_userspace_sig resume_userspace #endif -#define SAVE_ALL \ +#define __SAVE_ALL(_DS) \ cld; \ pushl %es; \ CFI_ADJUST_CFA_OFFSET 4;\ @@ -134,10 +136,24 @@ pushl %ebx; \ CFI_ADJUST_CFA_OFFSET 4;\ CFI_REL_OFFSET ebx, 0;\ - movl $(__USER_DS), %edx; \ + movl $(_DS), %edx; \ movl %edx, %ds; \ movl %edx, %es; +#ifdef CONFIG_PAX_KERNEXEC +#define SAVE_ALL \ + __SAVE_ALL(__KERNEL_DS) \ + GET_CR0_INTO_EDX; \ + movl %edx, %esi; \ + orl $0x10000, %edx; \ + xorl %edx, %esi; \ + SET_CR0_FROM_EDX +#elif defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) +#define SAVE_ALL __SAVE_ALL(__KERNEL_DS) +#else +#define SAVE_ALL __SAVE_ALL(__USER_DS) +#endif + #define RESTORE_INT_REGS \ popl %ebx; \ CFI_ADJUST_CFA_OFFSET -4;\ @@ -245,7 +261,18 @@ movb CS(%esp), %al andl $(VM_MASK | SEGMENT_RPL_MASK), %eax cmpl $USER_RPL, %eax + +#ifdef CONFIG_PAX_KERNEXEC + jae resume_userspace + + GET_CR0_INTO_EDX + xorl %esi, %edx + SET_CR0_FROM_EDX + jmp resume_kernel +#else jb resume_kernel # not returning to v8086 or userspace +#endif + ENTRY(resume_userspace) DISABLE_INTERRUPTS # make sure we don't miss an interrupt # setting need_resched or sigpending @@ -301,10 +328,9 @@ /*CFI_REL_OFFSET cs, 0*/ /* * Push current_thread_info()->sysenter_return to the stack. - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words - * pushed above; +8 corresponds to copy_thread's esp0 setting. */ - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) + GET_THREAD_INFO(%ebp) + pushl TI_sysenter_return(%ebp) CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET eip, 0 @@ -312,9 +338,20 @@ * Load the potential sixth argument from user stack. * Careful about security. */ + movl 12(%esp),%ebp + +#ifdef CONFIG_PAX_MEMORY_UDEREF + pushl $(__USER_DS) + CFI_ADJUST_CFA_OFFSET 4 + pop %ds + CFI_ADJUST_CFA_OFFSET -4 +1: movl %ds:(%ebp),%ebp +#else cmpl $__PAGE_OFFSET-3,%ebp jae syscall_fault 1: movl (%ebp),%ebp +#endif + .section __ex_table,"a" .align 4 .long 1b,syscall_fault @@ -337,14 +374,36 @@ movl TI_flags(%ebp), %ecx testw $_TIF_ALLWORK_MASK, %cx jne syscall_exit_work + +#ifdef CONFIG_PAX_RANDKSTACK + pushl %eax + CFI_ADJUST_CFA_OFFSET 4 + call pax_randomize_kstack + popl %eax + CFI_ADJUST_CFA_OFFSET -4 +#endif + /* if something modifies registers it must also disable sysexit */ movl EIP(%esp), %edx movl OLDESP(%esp), %ecx xorl %ebp,%ebp TRACE_IRQS_ON +1: mov DS(%esp), %ds +2: mov ES(%esp), %es ENABLE_INTERRUPTS_SYSEXIT CFI_ENDPROC +.section .fixup,"ax" +3: movl $0,DS(%esp) + jmp 1b +4: movl $0,ES(%esp) + jmp 2b +.previous +.section __ex_table,"a" + .align 4 + .long 1b,3b + .long 2b,4b +.previous # system call handler stub ENTRY(system_call) @@ -375,6 +434,10 @@ testw $_TIF_ALLWORK_MASK, %cx # current->work jne syscall_exit_work +#ifdef CONFIG_PAX_RANDKSTACK + call pax_randomize_kstack +#endif + restore_all: movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS # Warning: OLDSS(%esp) contains the wrong/random values if we @@ -553,7 +616,7 @@ * Build the entry stubs and pointer table with * some assembler magic. */ -.data +.section .rodata,"a",@progbits ENTRY(interrupt) .text @@ -568,7 +631,7 @@ 1: pushl $~(vector) CFI_ADJUST_CFA_OFFSET 4 jmp common_interrupt -.data +.section .rodata,"a",@progbits .long 1b .text vector=vector+1 @@ -642,12 +705,21 @@ popl %ecx CFI_ADJUST_CFA_OFFSET -4 /*CFI_REGISTER es, ecx*/ + +#ifdef CONFIG_PAX_KERNEXEC + GET_CR0_INTO_EDX + movl %edx, %esi + orl $0x10000, %edx + xorl %edx, %esi + SET_CR0_FROM_EDX +#endif + movl ES(%esp), %edi # get the function address movl ORIG_EAX(%esp), %edx # get the error code movl %eax, ORIG_EAX(%esp) movl %ecx, ES(%esp) /*CFI_REL_OFFSET es, ES*/ - movl $(__USER_DS), %ecx + movl $(__KERNEL_DS), %ecx movl %ecx, %ds movl %ecx, %es movl %esp,%eax # pt_regs pointer @@ -778,6 +850,13 @@ xorl %edx,%edx # zero error code movl %esp,%eax # pt_regs pointer call do_nmi + +#ifdef CONFIG_PAX_KERNEXEC + GET_CR0_INTO_EDX + xorl %esi, %edx + SET_CR0_FROM_EDX +#endif + jmp restore_nocheck_notrace CFI_ENDPROC @@ -820,6 +899,13 @@ CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved xorl %edx,%edx # zero error code call do_nmi + +#ifdef CONFIG_PAX_KERNEXEC + GET_CR0_INTO_EDX + xorl %esi, %edx + SET_CR0_FROM_EDX +#endif + RESTORE_REGS lss 12+4(%esp), %esp # back to 16bit stack 1: INTERRUPT_RETURN @@ -957,8 +1043,8 @@ movl %edi, EDI(%edx) movl %ebp, EBP(%edx) movl %ebx, EAX(%edx) - movl $__USER_DS, DS(%edx) - movl $__USER_DS, ES(%edx) + movl $__KERNEL_DS, DS(%edx) + movl $__KERNEL_DS, ES(%edx) movl %ebx, ORIG_EAX(%edx) movl %ecx, EIP(%edx) movl 12(%esp), %ecx @@ -987,7 +1073,6 @@ CFI_ENDPROC ENDPROC(kernel_thread_helper) -.section .rodata,"a" #include "syscall_table.S" syscall_table_size=(.-sys_call_table)