--- zzzz-none-000/linux-4.1.52/arch/arm/include/asm/cacheflush.h 2018-05-28 02:26:45.000000000 +0000 +++ bcm63-7530ax-731/linux-4.1.52/arch/arm/include/asm/cacheflush.h 2022-03-02 11:37:12.000000000 +0000 @@ -167,6 +167,21 @@ #endif +#if defined(CONFIG_BCM_KF_NBUFF) +#ifdef CONFIG_CPU_CACHE_V7 +#define __cpuc_flush_line(_addr) \ + __asm__ __volatile__("mcr p15, 0, %0, c7, c14, 1" : : "r" (_addr)) +#define __cpuc_clean_line(_addr) \ + __asm__ __volatile__("mcr p15, 0, %0, c7, c10, 1" : : "r" (_addr)) +#define __cpuc_inv_line(_addr) \ + __asm__ __volatile__("mcr p15, 0, %0, c7, c6, 1" : : "r" (_addr)) +#else +#define __cpuc_flush_line(_addr) do {} while(0) +#define __cpuc_clean_line(_addr) do {} while(0) +#define __cpuc_inv_line(_addr) do {} while(0) +#endif +#endif + /* * Copy user data from/to a page which is mapped into a different * processes address space. Really, we want to allow our "user @@ -239,7 +254,7 @@ } static inline void -vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) +vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn __maybe_unused) { struct mm_struct *mm = vma->vm_mm; @@ -343,7 +358,7 @@ * data, we need to do a full cache flush to ensure that writebacks * don't corrupt data placed into these pages via the new mappings. */ -static inline void flush_cache_vmap(unsigned long start, unsigned long end) +static inline void flush_cache_vmap(unsigned long start __maybe_unused, unsigned long end __maybe_unused) { if (!cache_is_vipt_nonaliasing()) flush_cache_all(); @@ -355,7 +370,7 @@ dsb(ishst); } -static inline void flush_cache_vunmap(unsigned long start, unsigned long end) +static inline void flush_cache_vunmap(unsigned long start __maybe_unused, unsigned long end __maybe_unused) { if (!cache_is_vipt_nonaliasing()) flush_cache_all();