--- zzzz-none-000/linux-2.6.19.2/arch/i386/kernel/sysenter.c 2007-01-10 19:10:37.000000000 +0000 +++ davinci-8020-5505/linux-2.6.19.2/arch/i386/kernel/sysenter.c 2007-01-19 14:42:56.000000000 +0000 @@ -45,7 +45,7 @@ void enable_sep_cpu(void) { int cpu = get_cpu(); - struct tss_struct *tss = &per_cpu(init_tss, cpu); + struct tss_struct *tss = init_tss + cpu; if (!boot_cpu_has(X86_FEATURE_SEP)) { put_cpu(); @@ -125,16 +125,36 @@ unsigned long addr; int ret; +#ifdef CONFIG_PAX_SEGMEXEC + struct vm_area_struct *vma_m = NULL; +#endif + + vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); + if (!vma) + return -ENOMEM; + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) { + vma_m = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); + if (!vma_m) { + kmem_cache_free(vm_area_cachep, vma); + return -ENOMEM; + } + } +#endif + down_write(&mm->mmap_sem); - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE); if (IS_ERR_VALUE(addr)) { ret = addr; - goto up_fail; - } - vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); - if (!vma) { - ret = -ENOMEM; + kmem_cache_free(vm_area_cachep, vma); + +#ifdef CONFIG_PAX_SEGMEXEC + if (vma_m) + kmem_cache_free(vm_area_cachep, vma_m); +#endif + goto up_fail; } @@ -142,18 +162,49 @@ vma->vm_end = addr + PAGE_SIZE; /* MAYWRITE to allow gdb to COW and set breakpoints */ vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE; + +#ifdef CONFIG_PAX_MPROTECT + if (mm->pax_flags & MF_PAX_MPROTECT) + vma->vm_flags &= ~VM_MAYWRITE; +#endif + vma->vm_flags |= mm->def_flags; - vma->vm_page_prot = protection_map[vma->vm_flags & 7]; + vma->vm_page_prot = protection_map[vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC)]; vma->vm_ops = &syscall_vm_ops; vma->vm_mm = mm; ret = insert_vm_struct(mm, vma); if (unlikely(ret)) { kmem_cache_free(vm_area_cachep, vma); + +#ifdef CONFIG_PAX_SEGMEXEC + if (vma_m) + kmem_cache_free(vm_area_cachep, vma_m); +#endif + goto up_fail; } - current->mm->context.vdso = (void *)addr; +#ifdef CONFIG_PAX_SEGMEXEC + if (vma_m) { + *vma_m = *vma; + vma_m->vm_start += SEGMEXEC_TASK_SIZE; + vma_m->vm_end += SEGMEXEC_TASK_SIZE; + ret = insert_vm_struct(mm, vma_m); + if (unlikely(ret)) { + kmem_cache_free(vm_area_cachep, vma_m); + goto up_fail; + } + vma_m->vm_flags |= VM_MIRROR; + vma->vm_flags |= VM_MIRROR; + vma_m->vm_mirror = vma->vm_start - vma_m->vm_start; + vma->vm_mirror = vma_m->vm_start - vma->vm_start; + vma_m->vm_pgoff = vma->vm_pgoff; + mm->total_vm++; + } +#endif + + current->mm->context.vdso = addr; current_thread_info()->sysenter_return = (void *)VDSO_SYM(&SYSENTER_RETURN); mm->total_vm++; @@ -164,8 +215,17 @@ const char *arch_vma_name(struct vm_area_struct *vma) { - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) + if (vma->vm_start == vma->vm_mm->context.vdso) return "[vdso]"; + +#ifdef CONFIG_PAX_SEGMEXEC + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_MIRROR)) + return NULL; + + if (vma->vm_start + vma->vm_mirror == vma->vm_mm->context.vdso) + return "[vdso]"; +#endif + return NULL; }