--- zzzz-none-000/linux-4.4.60/arch/arm/mm/dma-mapping.c 2017-04-08 07:53:53.000000000 +0000 +++ honeybee-1240e-714/linux-4.4.60/arch/arm/mm/dma-mapping.c 2019-07-03 09:21:34.000000000 +0000 @@ -225,7 +225,8 @@ return mask; } -static void __dma_clear_buffer(struct page *page, size_t size) +static void __dma_clear_buffer(struct page *page, size_t size, + struct dma_attrs *attrs) { /* * Ensure that the allocated pages are zeroed, and that any data @@ -236,7 +237,8 @@ phys_addr_t end = base + size; while (size > 0) { void *ptr = kmap_atomic(page); - memset(ptr, 0, PAGE_SIZE); + if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs)) + memset(ptr, 0, PAGE_SIZE); dmac_flush_range(ptr, ptr + PAGE_SIZE); kunmap_atomic(ptr); page++; @@ -245,7 +247,8 @@ outer_flush_range(base, end); } else { void *ptr = page_address(page); - memset(ptr, 0, size); + if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs)) + memset(ptr, 0, size); dmac_flush_range(ptr, ptr + size); outer_flush_range(__pa(ptr), __pa(ptr) + size); } @@ -271,7 +274,7 @@ for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) __free_page(p); - __dma_clear_buffer(page, size); + __dma_clear_buffer(page, size, NULL); return page; } @@ -293,7 +296,8 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page, - const void *caller, bool want_vaddr); + const void *caller, bool want_vaddr, + struct dma_attrs *attrs); static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, pgprot_t prot, struct page **ret_page, @@ -361,7 +365,7 @@ if (dev_get_cma_area(NULL)) ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, - &page, atomic_pool_init, true); + &page, atomic_pool_init, true, NULL); else ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, &page, atomic_pool_init, true); @@ -530,7 +534,8 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page, - const void *caller, bool want_vaddr) + const void *caller, bool want_vaddr, + struct dma_attrs *attrs) { unsigned long order = get_order(size); size_t count = size >> PAGE_SHIFT; @@ -541,7 +546,12 @@ if (!page) return NULL; - __dma_clear_buffer(page, size); + /* + * skip completely if we neither need to zero nor sync. + */ + if (!(dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs) && + dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))) + __dma_clear_buffer(page, size, attrs); if (!want_vaddr) goto out; @@ -591,7 +601,7 @@ #define __get_dma_pgprot(attrs, prot) __pgprot(0) #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL #define __alloc_from_pool(size, ret_page) NULL -#define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL +#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, a) NULL #define __free_from_pool(cpu_addr, size) 0 #define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0) #define __dma_free_remap(cpu_addr, size) do { } while (0) @@ -653,7 +663,7 @@ addr = __alloc_simple_buffer(dev, size, gfp, &page); else if (dev_get_cma_area(dev) && (gfp & __GFP_DIRECT_RECLAIM)) addr = __alloc_from_contiguous(dev, size, prot, &page, - caller, want_vaddr); + caller, want_vaddr, attrs); else if (is_coherent) addr = __alloc_simple_buffer(dev, size, gfp, &page); else if (!gfpflags_allow_blocking(gfp)) @@ -1146,7 +1156,7 @@ if (!page) goto error; - __dma_clear_buffer(page, size); + __dma_clear_buffer(page, size, NULL); for (i = 0; i < count; i++) pages[i] = page + i; @@ -1190,7 +1200,7 @@ pages[i + j] = pages[i] + j; } - __dma_clear_buffer(pages[i], PAGE_SIZE << order); + __dma_clear_buffer(pages[i], PAGE_SIZE << order, NULL); i += 1 << order; count -= 1 << order; } @@ -2140,6 +2150,7 @@ set_dma_ops(dev, dma_ops); } +EXPORT_SYMBOL(arch_setup_dma_ops); void arch_teardown_dma_ops(struct device *dev) {