--- zzzz-none-000/linux-2.6.19.2/mm/mprotect.c 2007-01-10 19:10:37.000000000 +0000 +++ davinci-8020-5504/linux-2.6.19.2/mm/mprotect.c 2007-01-19 14:42:56.000000000 +0000 @@ -21,10 +21,17 @@ #include #include #include +#include + +#ifdef CONFIG_PAX_MPROTECT +#include +#endif + #include #include #include #include +#include static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot, @@ -128,6 +135,94 @@ flush_tlb_range(vma, start, end); } +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT +/* called while holding the mmap semaphor for writing */ +static inline void establish_user_cs_limit(struct mm_struct *mm, unsigned long start, unsigned long end) +{ + struct vm_area_struct *vma = find_vma(mm, start); + + for (; vma && vma->vm_start < end; vma = vma->vm_next) + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma)); + +} + +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) +{ + unsigned long oldlimit, newlimit = 0UL; + + if (!(mm->pax_flags & MF_PAX_PAGEEXEC)) + return; + + spin_lock(&mm->page_table_lock); + oldlimit = mm->context.user_cs_limit; + if ((prot & VM_EXEC) && oldlimit < end) + /* USER_CS limit moved up */ + newlimit = end; + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end) + /* USER_CS limit moved down */ + newlimit = start; + + if (newlimit) { + mm->context.user_cs_limit = newlimit; + +#ifdef CONFIG_SMP + wmb(); + cpus_clear(mm->context.cpu_user_cs_mask); + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask); +#endif + + set_user_cs(mm, smp_processor_id()); + } + spin_unlock(&mm->page_table_lock); + if (newlimit == end) + establish_user_cs_limit(mm, oldlimit, end); +} +#endif + +#ifdef CONFIG_PAX_SEGMEXEC +static int __mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, + unsigned long start, unsigned long end, unsigned int newflags); + +static int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, + unsigned long start, unsigned long end, unsigned int newflags) +{ + if (vma->vm_flags & VM_MIRROR) { + struct vm_area_struct * vma_m, * prev_m; + unsigned long start_m, end_m; + int error; + + start_m = vma->vm_start + vma->vm_mirror; + vma_m = find_vma_prev(vma->vm_mm, start_m, &prev_m); + if (vma_m && vma_m->vm_start == start_m && (vma_m->vm_flags & VM_MIRROR)) { + start_m = start + vma->vm_mirror; + end_m = end + vma->vm_mirror; + + if (vma_m->vm_start >= SEGMEXEC_TASK_SIZE && !(newflags & VM_EXEC)) + error = __mprotect_fixup(vma_m, &prev_m, start_m, end_m, vma_m->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); + else + error = __mprotect_fixup(vma_m, &prev_m, start_m, end_m, newflags); + if (error) + return error; + } else { + printk("PAX: VMMIRROR: mprotect bug in %s, %08lx\n", current->comm, vma->vm_start); + return -ENOMEM; + } + } + + return __mprotect_fixup(vma, pprev, start, end, newflags); +} + +static int __mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, + unsigned long start, unsigned long end, unsigned int newflags) +{ + struct mm_struct * mm = vma->vm_mm; + unsigned long oldflags = vma->vm_flags; + long nrpages = (end - start) >> PAGE_SHIFT; + unsigned long charged = 0; + pgoff_t pgoff; + int error; + int dirty_accountable = 0; +#else static int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags) @@ -144,6 +239,7 @@ *pprev = vma; return 0; } +#endif /* * If we make a private mapping writable we increase our commit; @@ -193,12 +289,29 @@ * held in write mode. */ vma->vm_flags = newflags; - vma->vm_page_prot = protection_map[newflags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; if (vma_wants_writenotify(vma)) { + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) && (newflags & (VM_READ|VM_WRITE))) + vma->vm_page_prot = protection_map[(newflags | VM_EXEC) & + (VM_READ|VM_WRITE|VM_EXEC)]; + else +#endif + vma->vm_page_prot = protection_map[newflags & (VM_READ|VM_WRITE|VM_EXEC)]; dirty_accountable = 1; + } else { + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) && (newflags & (VM_READ|VM_WRITE))) + vma->vm_page_prot = protection_map[(newflags | VM_EXEC) & + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; + else +#endif + + vma->vm_page_prot = protection_map[newflags & + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; } if (is_vm_hugetlb_page(vma)) @@ -214,6 +327,69 @@ return error; } +#ifdef CONFIG_PAX_MPROTECT +/* PaX: non-PIC ELF libraries need relocations on their executable segments + * therefore we'll grant them VM_MAYWRITE once during their life. + * + * The checks favour ld-linux.so behaviour which operates on a per ELF segment + * basis because we want to allow the common case and not the special ones. + */ +static inline void pax_handle_maywrite(struct vm_area_struct * vma, unsigned long start) +{ + struct elfhdr elf_h; + struct elf_phdr elf_p, p_dyn; + elf_dyn dyn; + unsigned long i, j = 65536UL / sizeof(struct elf_phdr); + +#ifndef CONFIG_PAX_NOELFRELOCS + if ((vma->vm_start != start) || + !vma->vm_file || + !(vma->vm_flags & VM_MAYEXEC) || + (vma->vm_flags & VM_MAYNOTWRITE)) +#endif + + return; + + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char*)&elf_h, sizeof(elf_h)) || + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) || + +#ifdef CONFIG_PAX_ETEXECRELOCS + (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || +#else + elf_h.e_type != ET_DYN || +#endif + + !elf_check_arch(&elf_h) || + elf_h.e_phentsize != sizeof(struct elf_phdr) || + elf_h.e_phnum > j) + return; + + for (i = 0UL; i < elf_h.e_phnum; i++) { + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char*)&elf_p, sizeof(elf_p))) + return; + if (elf_p.p_type == PT_DYNAMIC) { + p_dyn = elf_p; + j = i; + } + } + if (elf_h.e_phnum <= j) + return; + + i = 0UL; + do { + if (sizeof(dyn) != kernel_read(vma->vm_file, p_dyn.p_offset + i*sizeof(dyn), (char*)&dyn, sizeof(dyn))) + return; + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) { + vma->vm_flags |= VM_MAYWRITE | VM_MAYNOTWRITE; + gr_log_textrel(vma); + return; + } + i++; + } while (dyn.d_tag != DT_NULL); + return; +} +#endif + asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot) { @@ -233,6 +409,17 @@ end = start + len; if (end <= start) return -ENOMEM; + +#ifdef CONFIG_PAX_SEGMEXEC + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { + if (end > SEGMEXEC_TASK_SIZE) + return -EINVAL; + } else +#endif + + if (end > TASK_SIZE) + return -EINVAL; + if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) return -EINVAL; @@ -240,7 +427,7 @@ /* * Does the application expect PROT_READ to imply PROT_EXEC: */ - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC)) prot |= PROT_EXEC; vm_flags = calc_vm_prot_bits(prot); @@ -272,6 +459,16 @@ if (start > vma->vm_start) prev = vma; + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) { + error = -EACCES; + goto out; + } + +#ifdef CONFIG_PAX_MPROTECT + if ((vma->vm_mm->pax_flags & MF_PAX_MPROTECT) && (prot & PROT_WRITE)) + pax_handle_maywrite(vma, start); +#endif + for (nstart = start ; ; ) { unsigned long newflags; @@ -285,6 +482,12 @@ goto out; } +#ifdef CONFIG_PAX_MPROTECT + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */ + if ((vma->vm_mm->pax_flags & MF_PAX_MPROTECT) && !(prot & PROT_WRITE) && (vma->vm_flags & VM_MAYNOTWRITE)) + newflags &= ~VM_MAYWRITE; +#endif + error = security_file_mprotect(vma, reqprot, prot); if (error) goto out; @@ -308,6 +511,9 @@ goto out; } } + + track_exec_limit(current->mm, start, end, vm_flags); + out: up_write(¤t->mm->mmap_sem); return error;