--- zzzz-none-000/linux-3.10.107/arch/arm/include/asm/cacheflush.h 2017-06-27 09:49:32.000000000 +0000 +++ vr9-7490-729/linux-3.10.107/arch/arm/include/asm/cacheflush.h 2021-11-10 11:53:52.000000000 +0000 @@ -94,6 +94,21 @@ * DMA Cache Coherency * =================== * + * dma_inv_range(start, end) + * + * Invalidate (discard) the specified virtual address range. + * May not write back any entries. If 'start' or 'end' + * are not cache line aligned, those lines must be written + * back. + * - start - virtual start address + * - end - virtual end address + * + * dma_clean_range(start, end) + * + * Clean (write back) the specified virtual address range. + * - start - virtual start address + * - end - virtual end address + * * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. @@ -115,6 +130,10 @@ void (*dma_map_area)(const void *, size_t, int); void (*dma_unmap_area)(const void *, size_t, int); +#if defined(CONFIG_ARCH_IPQ806X) || defined(CONFIG_ARCH_IPQ806X_DT) + void (*dma_inv_range)(const void *, const void *); + void (*dma_clean_range)(const void *, const void *); +#endif /* AVM: TODO: Notwendigkeit prüfen */ void (*dma_flush_range)(const void *, const void *); }; @@ -123,6 +142,8 @@ */ #ifdef MULTI_CACHE +// AVM4080 we have got multicache! + extern struct cpu_cache_fns cpu_cache; #define __cpuc_flush_icache_all cpu_cache.flush_icache_all @@ -142,6 +163,10 @@ */ #define dmac_map_area cpu_cache.dma_map_area #define dmac_unmap_area cpu_cache.dma_unmap_area +#if defined(CONFIG_ARCH_IPQ806X) || defined(CONFIG_ARCH_IPQ806X_DT) +#define dmac_inv_range cpu_cache.dma_inv_range +#define dmac_clean_range cpu_cache.dma_clean_range +#endif /* AVM: TODO: Notwendigkeit prüfen */ #define dmac_flush_range cpu_cache.dma_flush_range #else @@ -229,7 +254,8 @@ } static inline void -vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) +vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) { struct mm_struct *mm = vma->vm_mm; @@ -239,7 +265,8 @@ } static inline void -vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) +vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, + unsigned long pfn __maybe_unused) { struct mm_struct *mm = vma->vm_mm; @@ -344,7 +371,8 @@ * data, we need to do a full cache flush to ensure that writebacks * don't corrupt data placed into these pages via the new mappings. */ -static inline void flush_cache_vmap(unsigned long start, unsigned long end) +static inline void flush_cache_vmap(unsigned long start __maybe_unused, + unsigned long end __maybe_unused) { if (!cache_is_vipt_nonaliasing()) flush_cache_all(); @@ -356,7 +384,8 @@ dsb(); } -static inline void flush_cache_vunmap(unsigned long start, unsigned long end) +static inline void flush_cache_vunmap(unsigned long start __maybe_unused, + unsigned long end __maybe_unused ) { if (!cache_is_vipt_nonaliasing()) flush_cache_all();