--- zzzz-none-000/linux-2.6.39.4/mm/memory.c 2011-08-03 19:43:28.000000000 +0000 +++ puma6-atom-6490-729/linux-2.6.39.4/mm/memory.c 2021-11-10 13:38:18.000000000 +0000 @@ -38,6 +38,13 @@ * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) */ +/****************************************************************** + + Includes Intel Corporation's changes/modifications dated: 01/2013. + Changed/modified portions - Copyright(c) 2013, Intel Corporation. + +******************************************************************/ + #include #include #include @@ -730,7 +737,7 @@ pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) { - pte_t *orig_src_pte, *orig_dst_pte; + pte_t *orig_src_pte __attribute__((unused)), *orig_dst_pte __attribute__((unused)); pte_t *src_pte, *dst_pte; spinlock_t *src_ptl, *dst_ptl; int progress = 0; @@ -1253,6 +1260,16 @@ } EXPORT_SYMBOL_GPL(zap_vma_ptes); +/* + * FOLL_FORCE can write to even unwritable pte's, but only + * after we've gone through a COW cycle and they are dirty. + */ +static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) +{ + return pte_write(pte) || + ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); +} + /** * follow_page - look up a page descriptor from a user-virtual address * @vma: vm_area_struct mapping @address @@ -1335,7 +1352,7 @@ pte = *ptep; if (!pte_present(pte)) goto no_page; - if ((flags & FOLL_WRITE) && !pte_write(pte)) + if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) goto unlock; page = vm_normal_page(vma, address, pte); @@ -1625,7 +1642,7 @@ */ if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) - foll_flags &= ~FOLL_WRITE; + foll_flags |= FOLL_COW; cond_resched(); } @@ -3657,6 +3674,9 @@ return len; } +#ifdef CONFIG_ARCH_GEN3 +EXPORT_SYMBOL_GPL(generic_access_phys); +#endif #endif /*