--- zzzz-none-000/linux-3.10.107/include/linux/dma-mapping.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/include/linux/dma-mapping.h 2021-02-04 17:41:59.000000000 +0000 @@ -1,6 +1,7 @@ #ifndef _LINUX_DMA_MAPPING_H #define _LINUX_DMA_MAPPING_H +#include #include #include #include @@ -8,6 +9,12 @@ #include #include +/* + * A dma_addr_t can hold any valid DMA or bus address for the platform. + * It can be given to a device to use as a DMA source or target. A CPU cannot + * reference a dma_addr_t directly because there may be translation between + * its physical address space and the bus address space. + */ struct dma_map_ops { void* (*alloc)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, @@ -28,6 +35,10 @@ void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); + /* + * map_sg returns 0 on error and a value > 0 on success. + * It should never return a value < 0. + */ int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs); @@ -97,11 +108,47 @@ } #endif +/* + * Set both the DMA mask and the coherent DMA mask to the same thing. + * Note that we don't check the return value from dma_set_coherent_mask() + * as the DMA API guarantees that the coherent DMA mask can be set to + * the same or smaller than the streaming DMA mask. + */ +static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int rc = dma_set_mask(dev, mask); + if (rc == 0) + dma_set_coherent_mask(dev, mask); + return rc; +} + +/* + * Similar to the above, except it deals with the case where the device + * does not have dev->dma_mask appropriately setup. + */ +static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) +{ + dev->dma_mask = &dev->coherent_dma_mask; + return dma_set_mask_and_coherent(dev, mask); +} + extern u64 dma_get_required_mask(struct device *dev); +#ifndef arch_setup_dma_ops +static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, + u64 size, const struct iommu_ops *iommu, + bool coherent) { } +#endif + +#ifndef arch_teardown_dma_ops +static inline void arch_teardown_dma_ops(struct device *dev) { } +#endif + static inline unsigned int dma_get_max_seg_size(struct device *dev) { - return dev->dma_parms ? dev->dma_parms->max_segment_size : 65536; + if (dev->dma_parms && dev->dma_parms->max_segment_size) + return dev->dma_parms->max_segment_size; + return SZ_64K; } static inline unsigned int dma_set_max_seg_size(struct device *dev, @@ -110,14 +157,15 @@ if (dev->dma_parms) { dev->dma_parms->max_segment_size = size; return 0; - } else - return -EIO; + } + return -EIO; } static inline unsigned long dma_get_seg_boundary(struct device *dev) { - return dev->dma_parms ? - dev->dma_parms->segment_boundary_mask : 0xffffffff; + if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) + return dev->dma_parms->segment_boundary_mask; + return DMA_BIT_MASK(32); } static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) @@ -125,16 +173,22 @@ if (dev->dma_parms) { dev->dma_parms->segment_boundary_mask = mask; return 0; - } else - return -EIO; + } + return -EIO; +} + +#ifndef dma_max_pfn +static inline unsigned long dma_max_pfn(struct device *dev) +{ + return *dev->dma_mask >> PAGE_SHIFT; } +#endif static inline void *dma_zalloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { - void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); - if (ret) - memset(ret, 0, size); + void *ret = dma_alloc_coherent(dev, size, dma_handle, + flag | __GFP_ZERO); return ret; } @@ -156,7 +210,7 @@ #ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY static inline int -dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, +dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags) { return 0; @@ -187,13 +241,14 @@ extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY -extern int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, +extern int dmam_declare_coherent_memory(struct device *dev, + phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags); extern void dmam_release_declared_memory(struct device *dev); #else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */ static inline int dmam_declare_coherent_memory(struct device *dev, - dma_addr_t bus_addr, dma_addr_t device_addr, + phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, gfp_t gfp) { return 0; @@ -219,6 +274,32 @@ #define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \ dma_unmap_sg(dev, sgl, nents, dir) +#else +static inline void *dma_alloc_writecombine(struct device *dev, size_t size, + dma_addr_t *dma_addr, gfp_t gfp) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs); +} + +static inline void dma_free_writecombine(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs); +} + +static inline int dma_mmap_writecombine(struct device *dev, + struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, + size_t size) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); +} #endif /* CONFIG_HAVE_DMA_ATTRS */ #ifdef CONFIG_NEED_DMA_MAP_STATE