--- zzzz-none-000/linux-4.1.38/arch/arm/mm/dma-mapping.c 2017-01-18 18:48:06.000000000 +0000 +++ bcm63-7582-715/linux-4.1.38/arch/arm/mm/dma-mapping.c 2020-11-25 10:06:48.000000000 +0000 @@ -314,7 +314,7 @@ VM_ARM_DMA_CONSISTENT | VM_USERMAP); } -#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K +#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_1M static struct gen_pool *atomic_pool; static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE; @@ -371,7 +371,11 @@ goto destroy_genpool; gen_pool_set_algo(atomic_pool, +#if defined(CONFIG_BCM_KF_ARM_BCM963XX) + gen_pool_best_fit, +#else gen_pool_first_fit_order_align, +#endif (void *)PAGE_SHIFT); pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n", atomic_pool_size / 1024); @@ -594,6 +598,14 @@ #endif /* CONFIG_MMU */ +#if defined(CONFIG_BCM_KF_ARM_BCM963XX) +static void dmac_flush_area(const void * addr, size_t len, int dir) +{ + dmac_flush_range(addr, addr + len); +} + +#endif + static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, struct page **ret_page) { @@ -629,7 +641,11 @@ if (!mask) return NULL; +#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP) + if ((mask < 0xffffffffULL) && !(gfp & GFP_ACP)) +#else if (mask < 0xffffffffULL) +#endif gfp |= GFP_DMA; /* @@ -882,6 +898,29 @@ } } +#if defined(CONFIG_BCM_KF_ARM_BCM963XX) +void ___dma_page_cpu_to_dev_flush(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ +#ifdef CONFIG_OUTER_CACHE + unsigned long paddr; + + dma_cache_maint_page(page, off, size, dir, dmac_map_area); + + paddr = page_to_phys(page) + off; + if (dir == DMA_FROM_DEVICE) { + outer_inv_range(paddr, paddr + size); + } else { + outer_flush_range(paddr, paddr + size); + } +#endif + + dma_cache_maint_page(page, off, size, dir, &dmac_flush_area); +} +EXPORT_SYMBOL(___dma_page_cpu_to_dev_flush); + +#endif + /** * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices