--- zzzz-none-000/linux-3.10.107/include/linux/swapops.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/include/linux/swapops.h 2021-02-04 17:41:59.000000000 +0000 @@ -54,7 +54,7 @@ /* check whether a pte points to a swap entry */ static inline int is_swap_pte(pte_t pte) { - return !pte_none(pte) && !pte_present(pte) && !pte_file(pte); + return !pte_none(pte) && !pte_present(pte); } #endif @@ -66,7 +66,8 @@ { swp_entry_t arch_entry; - BUG_ON(pte_file(pte)); + if (pte_swp_soft_dirty(pte)) + pte = pte_swp_clear_soft_dirty(pte); arch_entry = __pte_to_swp_entry(pte); return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); } @@ -80,7 +81,6 @@ swp_entry_t arch_entry; arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); - BUG_ON(pte_file(__swp_entry_to_pte(arch_entry))); return __swp_entry_to_pte(arch_entry); } @@ -135,9 +135,12 @@ *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry)); } +extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, + spinlock_t *ptl); extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address); -extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte); +extern void migration_entry_wait_huge(struct vm_area_struct *vma, + struct mm_struct *mm, pte_t *pte); #else #define make_migration_entry(page, write) swp_entry(0, 0) @@ -147,10 +150,12 @@ } #define migration_entry_to_page(swp) NULL static inline void make_migration_entry_read(swp_entry_t *entryp) { } +static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, + spinlock_t *ptl) { } static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { } -static inline void migration_entry_wait_huge(struct mm_struct *mm, - pte_t *pte) { } +static inline void migration_entry_wait_huge(struct vm_area_struct *vma, + struct mm_struct *mm, pte_t *pte) { } static inline int is_write_migration_entry(swp_entry_t entry) { return 0; @@ -159,6 +164,9 @@ #endif #ifdef CONFIG_MEMORY_FAILURE + +extern atomic_long_t num_poisoned_pages __read_mostly; + /* * Support for hardware poisoned pages */ @@ -172,6 +180,31 @@ { return swp_type(entry) == SWP_HWPOISON; } + +static inline bool test_set_page_hwpoison(struct page *page) +{ + return TestSetPageHWPoison(page); +} + +static inline void num_poisoned_pages_inc(void) +{ + atomic_long_inc(&num_poisoned_pages); +} + +static inline void num_poisoned_pages_dec(void) +{ + atomic_long_dec(&num_poisoned_pages); +} + +static inline void num_poisoned_pages_add(long num) +{ + atomic_long_add(num, &num_poisoned_pages); +} + +static inline void num_poisoned_pages_sub(long num) +{ + atomic_long_sub(num, &num_poisoned_pages); +} #else static inline swp_entry_t make_hwpoison_entry(struct page *page) @@ -183,6 +216,15 @@ { return 0; } + +static inline bool test_set_page_hwpoison(struct page *page) +{ + return false; +} + +static inline void num_poisoned_pages_inc(void) +{ +} #endif #if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)