--- zzzz-none-000/linux-4.1.52/include/asm-generic/pgtable.h 2018-05-28 02:26:45.000000000 +0000 +++ bcm63-7530ax-731/linux-4.1.52/include/asm-generic/pgtable.h 2022-03-02 11:37:13.000000000 +0000 @@ -36,8 +36,10 @@ #endif #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, - unsigned long address, +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma + __maybe_unused, + unsigned long address + __maybe_unused, pte_t *ptep) { pte_t pte = *ptep; @@ -65,9 +67,11 @@ return r; } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ -static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, - unsigned long address, - pmd_t *pmdp) +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma + __maybe_unused, + unsigned long address + __maybe_unused, + pmd_t *pmdp __maybe_unused) { BUG(); return 0; @@ -86,8 +90,8 @@ #endif #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR -static inline pte_t ptep_get_and_clear(struct mm_struct *mm, - unsigned long address, +static inline pte_t ptep_get_and_clear(struct mm_struct *mm __maybe_unused, + unsigned long address __maybe_unused, pte_t *ptep) { pte_t pte = *ptep; @@ -123,7 +127,7 @@ #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long address, pte_t *ptep, - int full) + int full __maybe_unused) { pte_t pte; pte = ptep_get_and_clear(mm, address, ptep); @@ -137,10 +141,10 @@ * not present, or in the process of an address space destruction. */ #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL -static inline void pte_clear_not_present_full(struct mm_struct *mm, - unsigned long address, +static inline void pte_clear_not_present_full(struct mm_struct *mm __maybe_unused, + unsigned long address __maybe_unused, pte_t *ptep, - int full) + int full __maybe_unused) { pte_clear(mm, address, ptep); } @@ -160,7 +164,9 @@ #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT struct mm_struct; -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm __maybe_unused, + unsigned long address __maybe_unused, + pte_t *ptep) { pte_t old_pte = *ptep; set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); @@ -176,8 +182,9 @@ set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ -static inline void pmdp_set_wrprotect(struct mm_struct *mm, - unsigned long address, pmd_t *pmdp) +static inline void pmdp_set_wrprotect(struct mm_struct *mm __maybe_unused, + unsigned long address __maybe_unused, + pmd_t *pmdp __maybe_unused) { BUG(); } @@ -217,7 +224,7 @@ * host to transparently reclaim unused pages. This function returns * whether the pte's page is unused. */ -static inline int pte_unused(pte_t pte) +static inline int pte_unused(pte_t pte __maybe_unused) { return 0; } @@ -230,7 +237,8 @@ return pmd_val(pmd_a) == pmd_val(pmd_b); } #else /* CONFIG_TRANSPARENT_HUGEPAGE */ -static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) +static inline int pmd_same(pmd_t pmd_a __maybe_unused, + pmd_t pmd_b __maybe_unused) { BUG(); return 0; @@ -359,8 +367,9 @@ return ptep_get_and_clear(mm, addr, ptep); } -static inline void __ptep_modify_prot_commit(struct mm_struct *mm, - unsigned long addr, +static inline void __ptep_modify_prot_commit(struct mm_struct *mm + __maybe_unused, + unsigned long addr __maybe_unused, pte_t *ptep, pte_t pte) { /* @@ -442,37 +451,37 @@ #endif #ifndef CONFIG_HAVE_ARCH_SOFT_DIRTY -static inline int pte_soft_dirty(pte_t pte) +static inline int pte_soft_dirty(pte_t pte __maybe_unused) { return 0; } -static inline int pmd_soft_dirty(pmd_t pmd) +static inline int pmd_soft_dirty(pmd_t pmd __maybe_unused) { return 0; } -static inline pte_t pte_mksoft_dirty(pte_t pte) +static inline pte_t pte_mksoft_dirty(pte_t pte __maybe_unused) { return pte; } -static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) +static inline pmd_t pmd_mksoft_dirty(pmd_t pmd __maybe_unused) { return pmd; } -static inline pte_t pte_swp_mksoft_dirty(pte_t pte) +static inline pte_t pte_swp_mksoft_dirty(pte_t pte __maybe_unused) { return pte; } -static inline int pte_swp_soft_dirty(pte_t pte) +static inline int pte_swp_soft_dirty(pte_t pte __maybe_unused) { return 0; } -static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) +static inline pte_t pte_swp_clear_soft_dirty(pte_t pte __maybe_unused) { return pte; } @@ -489,9 +498,9 @@ * track_pfn_remap is called when a _new_ pfn mapping is being established * by remap_pfn_range() for physical range indicated by pfn and size. */ -static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, - unsigned long pfn, unsigned long addr, - unsigned long size) +static inline int track_pfn_remap(struct vm_area_struct *vma __maybe_unused, pgprot_t *prot __maybe_unused, + unsigned long pfn __maybe_unused, unsigned long addr __maybe_unused, + unsigned long size __maybe_unused) { return 0; } @@ -500,8 +509,8 @@ * track_pfn_insert is called when a _new_ single pfn is established * by vm_insert_pfn(). */ -static inline int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, - unsigned long pfn) +static inline int track_pfn_insert(struct vm_area_struct *vma __maybe_unused, pgprot_t *prot __maybe_unused, + unsigned long pfn __maybe_unused) { return 0; } @@ -510,7 +519,7 @@ * track_pfn_copy is called when vma that is covering the pfnmap gets * copied through copy_page_range(). */ -static inline int track_pfn_copy(struct vm_area_struct *vma) +static inline int track_pfn_copy(struct vm_area_struct *vma __maybe_unused) { return 0; } @@ -520,8 +529,8 @@ * untrack can be called for a specific region indicated by pfn and size or * can be for the entire vma (in which case pfn, size are zero). */ -static inline void untrack_pfn(struct vm_area_struct *vma, - unsigned long pfn, unsigned long size) +static inline void untrack_pfn(struct vm_area_struct *vma __maybe_unused, + unsigned long pfn __maybe_unused, unsigned long size __maybe_unused) { } #else @@ -552,7 +561,7 @@ return pfn == zero_pfn; } -static inline unsigned long my_zero_pfn(unsigned long addr) +static inline unsigned long my_zero_pfn(unsigned long addr __maybe_unused) { extern unsigned long zero_pfn; return zero_pfn; @@ -562,16 +571,16 @@ #ifdef CONFIG_MMU #ifndef CONFIG_TRANSPARENT_HUGEPAGE -static inline int pmd_trans_huge(pmd_t pmd) +static inline int pmd_trans_huge(pmd_t pmd __maybe_unused) { return 0; } -static inline int pmd_trans_splitting(pmd_t pmd) +static inline int pmd_trans_splitting(pmd_t pmd __maybe_unused) { return 0; } #ifndef __HAVE_ARCH_PMD_WRITE -static inline int pmd_write(pmd_t pmd) +static inline int pmd_write(pmd_t pmd __maybe_unused) { BUG(); return 0; @@ -666,7 +675,7 @@ * become null, but then a page fault can map in a THP and not a * regular page). */ -static inline int pmd_trans_unstable(pmd_t *pmd) +static inline int pmd_trans_unstable(pmd_t *pmd __maybe_unused) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE return pmd_none_or_trans_huge_or_clear_bad(pmd); @@ -684,12 +693,12 @@ * is the responsibility of the caller to distinguish between PROT_NONE * protections and NUMA hinting fault protections. */ -static inline int pte_protnone(pte_t pte) +static inline int pte_protnone(pte_t pte __maybe_unused) { return 0; } -static inline int pmd_protnone(pmd_t pmd) +static inline int pmd_protnone(pmd_t pmd __maybe_unused) { return 0; } @@ -703,19 +712,19 @@ int pud_clear_huge(pud_t *pud); int pmd_clear_huge(pmd_t *pmd); #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ -static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) +static inline int pud_set_huge(pud_t *pud __maybe_unused, phys_addr_t addr __maybe_unused, pgprot_t prot __maybe_unused) { return 0; } -static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) +static inline int pmd_set_huge(pmd_t *pmd __maybe_unused, phys_addr_t addr __maybe_unused, pgprot_t prot __maybe_unused) { return 0; } -static inline int pud_clear_huge(pud_t *pud) +static inline int pud_clear_huge(pud_t *pud __maybe_unused) { return 0; } -static inline int pmd_clear_huge(pmd_t *pmd) +static inline int pmd_clear_huge(pmd_t *pmd __maybe_unused) { return 0; }