--- zzzz-none-000/linux-4.1.52/include/linux/mm.h 2018-05-28 02:26:45.000000000 +0000 +++ bcm63-7530ax-731/linux-4.1.52/include/linux/mm.h 2022-03-02 11:37:13.000000000 +0000 @@ -28,6 +28,11 @@ struct user_struct; struct writeback_control; +#if defined(CONFIG_AVM_PAGE_TRACE) +unsigned long avm_get_page_current_pc(struct page *page); +void avm_set_page_current_pc(struct page *page, unsigned long pc); +#endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ + #ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ extern unsigned long max_mapnr; @@ -401,7 +406,7 @@ extern void kvfree(const void *addr); -static inline void compound_lock(struct page *page) +static inline void compound_lock(struct page *page __maybe_unused) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE VM_BUG_ON_PAGE(PageSlab(page), page); @@ -409,7 +414,7 @@ #endif } -static inline void compound_unlock(struct page *page) +static inline void compound_unlock(struct page *page __maybe_unused) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE VM_BUG_ON_PAGE(PageSlab(page), page); @@ -417,7 +422,8 @@ #endif } -static inline unsigned long compound_lock_irqsave(struct page *page) +static inline unsigned long compound_lock_irqsave( + struct page *page __maybe_unused) { unsigned long uninitialized_var(flags); #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -427,8 +433,9 @@ return flags; } -static inline void compound_unlock_irqrestore(struct page *page, - unsigned long flags) +static inline void compound_unlock_irqrestore(struct page *page __maybe_unused, + unsigned long flags + __maybe_unused) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE compound_unlock(page); @@ -568,7 +575,11 @@ void put_page(struct page *page); void put_pages_list(struct list_head *pages); -void split_page(struct page *page, unsigned int order); +void split_page(struct page *page, unsigned int order +#if defined(CONFIG_AVM_PAGE_TRACE) + , unsigned long pc +#endif + ); int split_free_page(struct page *page); /* @@ -744,7 +755,7 @@ return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; } -static inline int zone_to_nid(struct zone *zone) +static inline int zone_to_nid(struct zone *zone __maybe_unused) { #ifdef CONFIG_NUMA return zone->node; @@ -830,7 +841,7 @@ } #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ #else /* !CONFIG_NUMA_BALANCING */ -static inline int page_cpupid_xchg_last(struct page *page, int cpupid) +static inline int page_cpupid_xchg_last(struct page *page, int cpupid __maybe_unused) { return page_to_nid(page); /* XXX */ } @@ -840,36 +851,36 @@ return page_to_nid(page); /* XXX */ } -static inline int cpupid_to_nid(int cpupid) +static inline int cpupid_to_nid(int cpupid __maybe_unused) { return -1; } -static inline int cpupid_to_pid(int cpupid) +static inline int cpupid_to_pid(int cpupid __maybe_unused) { return -1; } -static inline int cpupid_to_cpu(int cpupid) +static inline int cpupid_to_cpu(int cpupid __maybe_unused) { return -1; } -static inline int cpu_pid_to_cpupid(int nid, int pid) +static inline int cpu_pid_to_cpupid(int nid __maybe_unused, int pid __maybe_unused) { return -1; } -static inline bool cpupid_pid_unset(int cpupid) +static inline bool cpupid_pid_unset(int cpupid __maybe_unused) { return 1; } -static inline void page_cpupid_reset_last(struct page *page) +static inline void page_cpupid_reset_last(struct page *page __maybe_unused) { } -static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) +static inline bool cpupid_match_pid(struct task_struct *task __maybe_unused, int cpupid __maybe_unused) { return false; } @@ -906,7 +917,7 @@ } static inline void set_page_links(struct page *page, enum zone_type zone, - unsigned long node, unsigned long pfn) + unsigned long node, unsigned long pfn __maybe_unused) { set_page_zone(page, zone); set_page_node(page, node); @@ -1080,7 +1091,7 @@ #ifdef CONFIG_SHMEM bool shmem_mapping(struct address_space *mapping); #else -static inline bool shmem_mapping(struct address_space *mapping) +static inline bool shmem_mapping(struct address_space *mapping __maybe_unused) { return false; } @@ -1182,17 +1193,17 @@ extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags); #else -static inline int handle_mm_fault(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long address, - unsigned int flags) +static inline int handle_mm_fault(struct mm_struct *mm __maybe_unused, + struct vm_area_struct *vma __maybe_unused, unsigned long address __maybe_unused, + unsigned int flags __maybe_unused) { /* should never happen if there's no MMU */ BUG(); return VM_FAULT_SIGBUS; } -static inline int fixup_user_fault(struct task_struct *tsk, - struct mm_struct *mm, unsigned long address, - unsigned int fault_flags) +static inline int fixup_user_fault(struct task_struct *tsk __maybe_unused, + struct mm_struct *mm __maybe_unused, unsigned long address __maybe_unused, + unsigned int fault_flags __maybe_unused) { /* should never happen if there's no MMU */ BUG(); @@ -1346,7 +1357,7 @@ #if defined(SPLIT_RSS_COUNTING) void sync_mm_rss(struct mm_struct *mm); #else -static inline void sync_mm_rss(struct mm_struct *mm) +static inline void sync_mm_rss(struct mm_struct *mm __maybe_unused) { } #endif @@ -1364,8 +1375,9 @@ } #ifdef __PAGETABLE_PUD_FOLDED -static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, - unsigned long address) +static inline int __pud_alloc(struct mm_struct *mm __maybe_unused, + pgd_t *pgd __maybe_unused, + unsigned long address __maybe_unused) { return 0; } @@ -1374,21 +1386,22 @@ #endif #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) -static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, - unsigned long address) +static inline int __pmd_alloc(struct mm_struct *mm __maybe_unused, + pud_t *pud __maybe_unused, + unsigned long address __maybe_unused) { return 0; } -static inline void mm_nr_pmds_init(struct mm_struct *mm) {} +static inline void mm_nr_pmds_init(struct mm_struct *mm __maybe_unused) {} -static inline unsigned long mm_nr_pmds(struct mm_struct *mm) +static inline unsigned long mm_nr_pmds(struct mm_struct *mm __maybe_unused) { return 0; } -static inline void mm_inc_nr_pmds(struct mm_struct *mm) {} -static inline void mm_dec_nr_pmds(struct mm_struct *mm) {} +static inline void mm_inc_nr_pmds(struct mm_struct *mm __maybe_unused) {} +static inline void mm_dec_nr_pmds(struct mm_struct *mm __maybe_unused) {} #else int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); @@ -1451,12 +1464,12 @@ { } -static inline bool ptlock_alloc(struct page *page) +static inline bool ptlock_alloc(struct page *page __maybe_unused) { return true; } -static inline void ptlock_free(struct page *page) +static inline void ptlock_free(struct page *page __maybe_unused) { } @@ -1466,7 +1479,7 @@ } #endif /* ALLOC_SPLIT_PTLOCKS */ -static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) +static inline spinlock_t *pte_lockptr(struct mm_struct *mm __maybe_unused, pmd_t *pmd) { return ptlock_ptr(pmd_page(*pmd)); } @@ -1499,13 +1512,13 @@ /* * We use mm->page_table_lock to guard all pagetable pages of the mm. */ -static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) +static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd __maybe_unused) { return &mm->page_table_lock; } static inline void ptlock_cache_init(void) {} -static inline bool ptlock_init(struct page *page) { return true; } -static inline void pte_lock_deinit(struct page *page) {} +static inline bool ptlock_init(struct page *page __maybe_unused) { return true; } +static inline void pte_lock_deinit(struct page *page __maybe_unused) {} #endif /* USE_SPLIT_PTE_PTLOCKS */ static inline void pgtable_init(void) @@ -1587,13 +1600,13 @@ #else -static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) +static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd __maybe_unused) { return &mm->page_table_lock; } -static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; } -static inline void pgtable_pmd_page_dtor(struct page *page) {} +static inline bool pgtable_pmd_page_ctor(struct page *page __maybe_unused) { return true; } +static inline void pgtable_pmd_page_dtor(struct page *page __maybe_unused) {} #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) @@ -1720,7 +1733,7 @@ #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) -static inline int __early_pfn_to_nid(unsigned long pfn) +static inline int __early_pfn_to_nid(unsigned long pfn __maybe_unused) { return 0; } @@ -1859,7 +1872,7 @@ (void) __mm_populate(addr, len, 1); } #else -static inline void mm_populate(unsigned long addr, unsigned long len) {} +static inline void mm_populate(unsigned long addr __maybe_unused, unsigned long len __maybe_unused) {} #endif /* These take the mm semaphore themselves */ @@ -1916,7 +1929,15 @@ void task_dirty_inc(struct task_struct *tsk); /* readahead.c */ +#if defined(CONFIG_AVM_KERNEL) +#define VM_MAX_READAHEAD 1024 /* kbytes */ +#else +#if (defined(CONFIG_BCM_KF_USB_STORAGE) && defined(CONFIG_MIPS_BCM963XX)) +#define VM_MAX_READAHEAD 512 /* kbytes */ +#else #define VM_MAX_READAHEAD 128 /* kbytes */ +#endif +#endif #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ int force_page_cache_readahead(struct address_space *mapping, struct file *filp, @@ -2011,7 +2032,7 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags); void vma_set_page_prot(struct vm_area_struct *vma); #else -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) +static inline pgprot_t vm_get_page_prot(unsigned long vm_flags __maybe_unused) { return __pgprot(0); } @@ -2072,7 +2093,7 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long); #else static inline void vm_stat_account(struct mm_struct *mm, - unsigned long flags, struct file *file, long pages) + unsigned long flags __maybe_unused, struct file *file __maybe_unused, long pages) { mm->total_vm += pages; } @@ -2100,7 +2121,8 @@ #endif /* CONFIG_HIBERNATION */ #else static inline void -kernel_map_pages(struct page *page, int numpages, int enable) {} +kernel_map_pages(struct page *page __maybe_unused, int numpages __maybe_unused, + int enable __maybe_unused) {} #ifdef CONFIG_HIBERNATION static inline bool kernel_page_present(struct page *page) { return true; } #endif /* CONFIG_HIBERNATION */ @@ -2111,12 +2133,12 @@ extern int in_gate_area_no_mm(unsigned long addr); extern int in_gate_area(struct mm_struct *mm, unsigned long addr); #else -static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) +static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm __maybe_unused) { return NULL; } -static inline int in_gate_area_no_mm(unsigned long addr) { return 0; } -static inline int in_gate_area(struct mm_struct *mm, unsigned long addr) +static inline int in_gate_area_no_mm(unsigned long addr __maybe_unused) { return 0; } +static inline int in_gate_area(struct mm_struct *mm __maybe_unused, unsigned long addr __maybe_unused) { return 0; } @@ -2219,7 +2241,7 @@ #else static inline unsigned int debug_guardpage_minorder(void) { return 0; } static inline bool debug_guardpage_enabled(void) { return false; } -static inline bool page_is_guard(struct page *page) { return false; } +static inline bool page_is_guard(struct page *page __maybe_unused) { return false; } #endif /* CONFIG_DEBUG_PAGEALLOC */ #if MAX_NUMNODES > 1