--- zzzz-none-000/linux-3.10.107/include/linux/rmap.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/include/linux/rmap.h 2021-02-04 17:41:59.000000000 +0000 @@ -37,6 +37,16 @@ atomic_t refcount; /* + * Count of child anon_vmas and VMAs which points to this anon_vma. + * + * This counter is used for making decision about reusing anon_vma + * instead of forking new one. See comments in function anon_vma_clone. + */ + unsigned degree; + + struct anon_vma *parent; /* Parent of this anon_vma */ + + /* * NOTE: the LSB of the rb_root.rb_node is set by * mm_take_all_locks() _after_ taking the above lock. So the * rb_root must only be read/written after taking the above lock @@ -72,14 +82,16 @@ }; enum ttu_flags { - TTU_UNMAP = 0, /* unmap mode */ - TTU_MIGRATION = 1, /* migration mode */ - TTU_MUNLOCK = 2, /* munlock mode */ - TTU_ACTION_MASK = 0xff, + TTU_UNMAP = 1, /* unmap mode */ + TTU_MIGRATION = 2, /* migration mode */ + TTU_MUNLOCK = 4, /* munlock mode */ TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ + TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible + * and caller guarantees they will + * do a final flush if necessary */ }; #ifdef CONFIG_MMU @@ -96,28 +108,6 @@ __put_anon_vma(anon_vma); } -static inline struct anon_vma *page_anon_vma(struct page *page) -{ - if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != - PAGE_MAPPING_ANON) - return NULL; - return page_rmapping(page); -} - -static inline void vma_lock_anon_vma(struct vm_area_struct *vma) -{ - struct anon_vma *anon_vma = vma->anon_vma; - if (anon_vma) - down_write(&anon_vma->root->rwsem); -} - -static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) -{ - struct anon_vma *anon_vma = vma->anon_vma; - if (anon_vma) - up_write(&anon_vma->root->rwsem); -} - static inline void anon_vma_lock_write(struct anon_vma *anon_vma) { down_write(&anon_vma->root->rwsem); @@ -151,7 +141,7 @@ static inline void anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) { - VM_BUG_ON(vma->anon_vma != next->anon_vma); + VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); unlink_anon_vmas(next); } @@ -183,17 +173,13 @@ */ int page_referenced(struct page *, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags); -int page_referenced_one(struct page *, struct vm_area_struct *, - unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) int try_to_unmap(struct page *, enum ttu_flags flags); -int try_to_unmap_one(struct page *, struct vm_area_struct *, - unsigned long address, enum ttu_flags flags); /* - * Called from mm/filemap_xip.c to unmap empty zero page + * Used by uprobes to replace a userspace page safely */ pte_t *__page_check_address(struct page *, struct mm_struct *, unsigned long, spinlock_t **, int); @@ -236,10 +222,24 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); /* - * Called by migrate.c to remove migration ptes, but might be used more later. - */ -int rmap_walk(struct page *page, int (*rmap_one)(struct page *, - struct vm_area_struct *, unsigned long, void *), void *arg); + * rmap_walk_control: To control rmap traversing for specific needs + * + * arg: passed to rmap_one() and invalid_vma() + * rmap_one: executed on each vma where page is mapped + * done: for checking traversing termination condition + * anon_lock: for getting anon_lock by optimized way rather than default + * invalid_vma: for skipping uninterested vma + */ +struct rmap_walk_control { + void *arg; + int (*rmap_one)(struct page *page, struct vm_area_struct *vma, + unsigned long addr, void *arg); + int (*done)(struct page *page); + struct anon_vma *(*anon_lock)(struct page *page); + bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); +}; + +int rmap_walk(struct page *page, struct rmap_walk_control *rwc); #else /* !CONFIG_MMU */