--- zzzz-none-000/linux-3.10.107/arch/mips/mm/dma-default.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/arch/mips/mm/dma-default.c 2021-02-04 17:41:59.000000000 +0000 @@ -16,12 +16,15 @@ #include #include #include +#include #include +#include #include #include +#ifdef CONFIG_DMA_MAYBE_COHERENT int coherentio = 0; /* User defined DMA coherency from command line. */ EXPORT_SYMBOL_GPL(coherentio); int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */ @@ -41,30 +44,7 @@ return 0; } early_param("nocoherentio", setnocoherentio); - -static inline struct page *dma_addr_to_page(struct device *dev, - dma_addr_t dma_addr) -{ - return pfn_to_page( - plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT); -} - -/* - * The affected CPUs below in 'cpu_needs_post_dma_flush()' can - * speculatively fill random cachelines with stale data at any time, - * requiring an extra flush post-DMA. - * - * Warning on the terminology - Linux calls an uncached area coherent; - * MIPS terminology calls memory areas with hardware maintained coherency - * coherent. - */ -static inline int cpu_needs_post_dma_flush(struct device *dev) -{ - return !plat_device_is_coherent(dev) && - (current_cpu_type() == CPU_R10000 || - current_cpu_type() == CPU_R12000 || - current_cpu_type() == CPU_BMIPS5000); -} +#endif static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) { @@ -103,7 +83,7 @@ return gfp | dma_flag; } -void *dma_alloc_noncoherent(struct device *dev, size_t size, +static void *mips_dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp) { void *ret; @@ -119,59 +99,76 @@ return ret; } -EXPORT_SYMBOL(dma_alloc_noncoherent); -static void *mips_dma_alloc_coherent(struct device *dev, size_t size, +void *mips_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs) { void *ret; + struct page *page = NULL; + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) - return ret; + /* + * XXX: seems like the coherent and non-coherent implementations could + * be consolidated. + */ + if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) + return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp); gfp = massage_gfp_flags(dev, gfp); - ret = (void *) __get_free_pages(gfp, get_order(size)); - - if (ret) { - memset(ret, 0, size); - *dma_handle = plat_map_dma_mem(dev, ret, size); - - if (!plat_device_is_coherent(dev)) { - dma_cache_wback_inv((unsigned long) ret, size); - if (!hw_coherentio) - ret = UNCAC_ADDR(ret); - } + if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp)) + page = dma_alloc_from_contiguous(dev, + count, get_order(size)); + if (!page) + page = alloc_pages(gfp, get_order(size)); + + if (!page) + return NULL; + + ret = page_address(page); + memset(ret, 0, size); + *dma_handle = plat_map_dma_mem(dev, ret, size); + if (!plat_device_is_coherent(dev)) { + dma_cache_wback_inv((unsigned long) ret, size); + if (!hw_coherentio) + ret = UNCAC_ADDR(ret); } return ret; } +EXPORT_SYMBOL(mips_dma_alloc_coherent); -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) +static void mips_dma_free_noncoherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) { plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); free_pages((unsigned long) vaddr, get_order(size)); } -EXPORT_SYMBOL(dma_free_noncoherent); -static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, +void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { unsigned long addr = (unsigned long) vaddr; - int order = get_order(size); + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + struct page *page = NULL; - if (dma_release_from_coherent(dev, order, vaddr)) + if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { + mips_dma_free_noncoherent(dev, size, vaddr, dma_handle); return; + } plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); if (!plat_device_is_coherent(dev) && !hw_coherentio) addr = CAC_ADDR(addr); - free_pages(addr, get_order(size)); + page = virt_to_page((void *) addr); + + if (!dma_release_from_contiguous(dev, page, count)) + __free_pages(page, get_order(size)); } +EXPORT_SYMBOL(mips_dma_free_coherent); static inline void __dma_sync_virtual(void *addr, size_t size, enum dma_data_direction direction) @@ -200,7 +197,7 @@ * If highmem is not configured then the bulk of this loop gets * optimized out. */ -static inline void __dma_sync(struct page *page, +void __dma_sync(struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { size_t left = size; @@ -230,140 +227,23 @@ left -= len; } while (left); } - -static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) -{ - if (cpu_needs_post_dma_flush(dev)) - __dma_sync(dma_addr_to_page(dev, dma_addr), - dma_addr & ~PAGE_MASK, size, direction); - - plat_unmap_dma_mem(dev, dma_addr, size, direction); -} - -static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction, struct dma_attrs *attrs) -{ - int i; - - for (i = 0; i < nents; i++, sg++) { - if (!plat_device_is_coherent(dev)) - __dma_sync(sg_page(sg), sg->offset, sg->length, - direction); - sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) + - sg->offset; - } - - return nents; -} - -static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) -{ - if (!plat_device_is_coherent(dev)) - __dma_sync(page, offset, size, direction); - - return plat_map_dma_mem_page(dev, page) + offset; -} - -static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nhwentries, enum dma_data_direction direction, - struct dma_attrs *attrs) -{ - int i; - - for (i = 0; i < nhwentries; i++, sg++) { - if (!plat_device_is_coherent(dev) && - direction != DMA_TO_DEVICE) - __dma_sync(sg_page(sg), sg->offset, sg->length, - direction); - plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction); - } -} - -static void mips_dma_sync_single_for_cpu(struct device *dev, - dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) -{ - if (cpu_needs_post_dma_flush(dev)) - __dma_sync(dma_addr_to_page(dev, dma_handle), - dma_handle & ~PAGE_MASK, size, direction); -} - -static void mips_dma_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) -{ - plat_extra_sync_for_device(dev); - if (!plat_device_is_coherent(dev)) - __dma_sync(dma_addr_to_page(dev, dma_handle), - dma_handle & ~PAGE_MASK, size, direction); -} - -static void mips_dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sg, int nelems, enum dma_data_direction direction) -{ - int i; - - /* Make sure that gcc doesn't leave the empty loop body. */ - for (i = 0; i < nelems; i++, sg++) { - if (cpu_needs_post_dma_flush(dev)) - __dma_sync(sg_page(sg), sg->offset, sg->length, - direction); - } -} - -static void mips_dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sg, int nelems, enum dma_data_direction direction) -{ - int i; - - /* Make sure that gcc doesn't leave the empty loop body. */ - for (i = 0; i < nelems; i++, sg++) { - if (!plat_device_is_coherent(dev)) - __dma_sync(sg_page(sg), sg->offset, sg->length, - direction); - } -} - -int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return plat_dma_mapping_error(dev, dma_addr); -} - -int mips_dma_supported(struct device *dev, u64 mask) -{ - return plat_dma_supported(dev, mask); -} +EXPORT_SYMBOL(__dma_sync); void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); - plat_extra_sync_for_device(dev); if (!plat_device_is_coherent(dev)) __dma_sync_virtual(vaddr, size, direction); } EXPORT_SYMBOL(dma_cache_sync); -static struct dma_map_ops mips_default_dma_map_ops = { - .alloc = mips_dma_alloc_coherent, - .free = mips_dma_free_coherent, - .map_page = mips_dma_map_page, - .unmap_page = mips_dma_unmap_page, - .map_sg = mips_dma_map_sg, - .unmap_sg = mips_dma_unmap_sg, - .sync_single_for_cpu = mips_dma_sync_single_for_cpu, - .sync_single_for_device = mips_dma_sync_single_for_device, - .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu, - .sync_sg_for_device = mips_dma_sync_sg_for_device, - .mapping_error = mips_dma_mapping_error, - .dma_supported = mips_dma_supported -}; - -struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; +#ifdef CONFIG_SYS_HAS_DMA_OPS +struct dma_map_ops *mips_dma_map_ops = NULL; EXPORT_SYMBOL(mips_dma_map_ops); +#endif #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)