--- zzzz-none-000/linux-2.6.19.2/mm/memory.c 2007-01-10 19:10:37.000000000 +0000 +++ davinci-8020-5504/linux-2.6.19.2/mm/memory.c 2007-01-19 15:11:30.000000000 +0000 @@ -50,6 +50,7 @@ #include #include #include +#include #include #include @@ -322,6 +323,11 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) { + +#ifdef CONFIG_PAX_KERNEXEC + unsigned long cr0; +#endif + pte_t *new = pte_alloc_one_kernel(&init_mm, address); if (!new) return -ENOMEM; @@ -329,8 +335,19 @@ spin_lock(&init_mm.page_table_lock); if (pmd_present(*pmd)) /* Another has populated it */ pte_free_kernel(new); - else + else { + +#ifdef CONFIG_PAX_KERNEXEC + pax_open_kernel(cr0); +#endif + pmd_populate_kernel(&init_mm, pmd, new); + +#ifdef CONFIG_PAX_KERNEXEC + pax_close_kernel(cr0); +#endif + + } spin_unlock(&init_mm.page_table_lock); return 0; } @@ -1469,6 +1486,88 @@ copy_user_highpage(dst, src, va); } +#ifdef CONFIG_PAX_SEGMEXEC +/* PaX: if vma is mirrored, synchronize the mirror's PTE + * + * the ptl of the lower mapped page is held on entry and is not released on exit + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc) + */ +static void pax_mirror_fault(struct vm_area_struct *vma, unsigned long address, pte_t *pte) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long address_m, pfn_m; + struct vm_area_struct * vma_m = NULL; + pte_t * pte_m, entry_m; + struct page * page_m = NULL; + + address_m = vma->vm_start + vma->vm_mirror; + vma_m = find_vma(mm, address_m); + BUG_ON(!vma_m || vma_m->vm_start != address_m); + + address_m = address + vma->vm_mirror; + pte_m = pte_offset_map_nested(pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m), address_m); + + if (pte_same(*pte, *pte_m)) { + pte_unmap_nested(pte_m); + return; + } + + pfn_m = pte_pfn(*pte); + if (pte_present(*pte_m)) { + page_m = vm_normal_page(vma_m, address_m, *pte_m); + if (page_m) { + flush_cache_page(vma_m, address_m, pfn_m); + flush_icache_page(vma_m, page_m); + } + } + + if (pte_present(*pte_m)) + entry_m = ptep_clear_flush(vma_m, address_m, pte_m); + else + entry_m = ptep_get_and_clear(mm, address_m, pte_m); + + if (pte_none(entry_m)) { + } else if (pte_present(entry_m)) { + if (page_m) { + page_remove_rmap(page_m); + if (PageAnon(page_m)) + dec_mm_counter(mm, anon_rss); + else + dec_mm_counter(mm, file_rss); + page_cache_release(page_m); + } + } else if (!pte_file(entry_m)) { + free_swap_and_cache(pte_to_swp_entry(entry_m)); + } else { + printk(KERN_ERR "PAX: VMMIRROR: bug in mirror_fault: %08lx, %08lx, %08lx, %08lx\n", + address, vma->vm_start, address_m, vma_m->vm_start); + } + + page_m = vm_normal_page(vma, address, *pte); + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot); + if (pte_write(*pte)) + entry_m = maybe_mkwrite(pte_mkdirty(entry_m), vma_m); + if (page_m) { + page_cache_get(page_m); + /* + * we can test PAGE_MAPPING_ANON without holding page_map_lock because + * we hold the page table lock and have a reference to page_m + */ + if (PageAnon(page_m)) { + page_add_anon_rmap(page_m, vma_m, address_m); + inc_mm_counter(mm, anon_rss); + } else { + page_add_file_rmap(page_m); + inc_mm_counter(mm, file_rss); + } + } + set_pte_at(mm, address_m, pte_m, entry_m); + update_mmu_cache(vma_m, address_m, entry_m); + lazy_mmu_prot_update(entry_m); + pte_unmap_nested(pte_m); +} +#endif + /* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address @@ -1612,6 +1711,12 @@ /* Free the old page.. */ new_page = old_page; ret |= VM_FAULT_WRITE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (vma->vm_flags & VM_MIRROR) + pax_mirror_fault(vma, address, page_table); +#endif + } if (new_page) page_cache_release(new_page); @@ -1871,6 +1976,7 @@ do_expand: limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; + gr_learn_resource(current, RLIMIT_FSIZE, offset, 1); if (limit != RLIM_INFINITY && offset > limit) goto out_sig; if (offset > inode->i_sb->s_maxbytes) @@ -2065,6 +2171,12 @@ /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, pte); lazy_mmu_prot_update(pte); + +#ifdef CONFIG_PAX_SEGMEXEC + if (vma->vm_flags & VM_MIRROR) + pax_mirror_fault(vma, address, page_table); +#endif + unlock: pte_unmap_unlock(page_table, ptl); out: @@ -2127,6 +2239,12 @@ /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, entry); lazy_mmu_prot_update(entry); + +#ifdef CONFIG_PAX_SEGMEXEC + if (vma->vm_flags & VM_MIRROR) + pax_mirror_fault(vma, address, page_table); +#endif + unlock: pte_unmap_unlock(page_table, ptl); return VM_FAULT_MINOR; @@ -2272,6 +2390,12 @@ /* no need to invalidate: a not-present page shouldn't be cached */ update_mmu_cache(vma, address, entry); lazy_mmu_prot_update(entry); + +#ifdef CONFIG_PAX_SEGMEXEC + if (vma->vm_flags & VM_MIRROR) + pax_mirror_fault(vma, address, page_table); +#endif + unlock: pte_unmap_unlock(page_table, ptl); if (dirty_page) { @@ -2439,6 +2563,12 @@ flush_tlb_page(vma, address); } unlock: + +#ifdef CONFIG_PAX_SEGMEXEC + if (vma->vm_flags & VM_MIRROR) + pax_mirror_fault(vma, address, pte); +#endif + pte_unmap_unlock(pte, ptl); return VM_FAULT_MINOR; } @@ -2461,6 +2591,49 @@ if (unlikely(is_vm_hugetlb_page(vma))) return hugetlb_fault(mm, vma, address, write_access); +#ifdef CONFIG_PAX_SEGMEXEC + if (vma->vm_flags & VM_MIRROR) { + unsigned long address_m; + struct vm_area_struct * vma_m; + pgd_t *pgd_m; + pud_t *pud_m; + pmd_t *pmd_m; + + address_m = vma->vm_start + vma->vm_mirror; + vma_m = find_vma(mm, address_m); + + /* PaX: sanity checks */ + if (!vma_m) { + printk(KERN_ERR "PAX: VMMIRROR: fault bug, %08lx, %p, %08lx, %p\n", + address, vma, address_m, vma_m); + return VM_FAULT_SIGBUS; + } else if (!(vma_m->vm_flags & VM_MIRROR) || + vma_m->vm_start != address_m || + vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start) + { + printk(KERN_ERR "PAX: VMMIRROR: fault bug2, %08lx, %08lx, %08lx, %08lx, %08lx\n", + address, vma->vm_start, vma_m->vm_start, vma->vm_end, vma_m->vm_end); + return VM_FAULT_SIGBUS; + } + + if (address_m < address) { + address += vma->vm_mirror; + vma = vma_m; + } + + address_m = address + vma->vm_mirror; + pgd_m = pgd_offset(mm, address_m); + pud_m = pud_alloc(mm, pgd_m, address_m); + if (!pud_m) + return VM_FAULT_OOM; + pmd_m = pmd_alloc(mm, pud_m, address_m); + if (!pmd_m) + return VM_FAULT_OOM; + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m)) + return VM_FAULT_OOM; + } +#endif + pgd = pgd_offset(mm, address); pud = pud_alloc(mm, pgd, address); if (!pud)