--- zzzz-none-000/linux-4.1.52/arch/arm/mm/dma-mapping.c 2018-05-28 02:26:45.000000000 +0000 +++ bcm63-7530ax-731/linux-4.1.52/arch/arm/mm/dma-mapping.c 2022-03-02 11:37:12.000000000 +0000 @@ -263,7 +263,11 @@ /* * Now split the huge page and free the excess pages */ - split_page(page, order); + split_page(page, order +#if defined(CONFIG_AVM_PAGE_TRACE) + , _RET_IP_ +#endif + ); for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) __free_page(p); @@ -314,7 +318,7 @@ VM_ARM_DMA_CONSISTENT | VM_USERMAP); } -#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K +#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_1M static struct gen_pool *atomic_pool; static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE; @@ -371,7 +375,11 @@ goto destroy_genpool; gen_pool_set_algo(atomic_pool, +#if defined(CONFIG_BCM_KF_ARM_BCM963XX) + gen_pool_best_fit, +#else gen_pool_first_fit_order_align, +#endif (void *)PAGE_SHIFT); pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n", atomic_pool_size / 1024); @@ -594,6 +602,14 @@ #endif /* CONFIG_MMU */ +#if defined(CONFIG_BCM_KF_ARM_BCM963XX) +static void dmac_flush_area(const void * addr, size_t len, int dir) +{ + dmac_flush_range(addr, addr + len); +} + +#endif + static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, struct page **ret_page) { @@ -629,7 +645,11 @@ if (!mask) return NULL; +#if defined(CONFIG_BCM_KF_ARM_BCM963XX) && defined(CONFIG_BCM_ZONE_ACP) + if ((mask < 0xffffffffULL) && !(gfp & GFP_ACP)) +#else if (mask < 0xffffffffULL) +#endif gfp |= GFP_DMA; /* @@ -900,6 +920,29 @@ } } +#if defined(CONFIG_BCM_KF_ARM_BCM963XX) +void ___dma_page_cpu_to_dev_flush(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ +#ifdef CONFIG_OUTER_CACHE + unsigned long paddr; + + dma_cache_maint_page(page, off, size, dir, dmac_map_area); + + paddr = page_to_phys(page) + off; + if (dir == DMA_FROM_DEVICE) { + outer_inv_range(paddr, paddr + size); + } else { + outer_flush_range(paddr, paddr + size); + } +#endif + + dma_cache_maint_page(page, off, size, dir, &dmac_flush_area); +} +EXPORT_SYMBOL(___dma_page_cpu_to_dev_flush); + +#endif + /** * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -1192,7 +1235,11 @@ } if (order) { - split_page(pages[i], order); + split_page(pages[i], order +#if defined(CONFIG_AVM_PAGE_TRACE) + , _RET_IP_ +#endif + ); j = 1 << order; while (--j) pages[i + j] = pages[i] + j; @@ -2146,6 +2193,9 @@ set_dma_ops(dev, dma_ops); } +#if defined(CONFIG_BCM_KF_GLB_COHERENCY) && defined(CONFIG_BCM_GLB_COHERENCY) +EXPORT_SYMBOL(arch_setup_dma_ops); +#endif void arch_teardown_dma_ops(struct device *dev) {