--- zzzz-none-000/linux-3.10.107/arch/arm64/include/asm/dma-mapping.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/arch/arm64/include/asm/dma-mapping.h 2021-02-04 17:41:59.000000000 +0000 @@ -21,58 +21,65 @@ #include #include -#include +#include +#include -#define ARCH_HAS_DMA_GET_REQUIRED_MASK +#define DMA_ERROR_CODE (~(dma_addr_t)0) +extern struct dma_map_ops dummy_dma_ops; -extern struct dma_map_ops *dma_ops; +static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) +{ + if (dev && dev->archdata.dma_ops) + return dev->archdata.dma_ops; + + /* + * We expect no ISA devices, and all other DMA masters are expected to + * have someone call arch_setup_dma_ops at device creation time. + */ + return &dummy_dma_ops; +} static inline struct dma_map_ops *get_dma_ops(struct device *dev) { - if (unlikely(!dev) || !dev->archdata.dma_ops) - return dma_ops; + if (xen_initial_domain()) + return xen_dma_ops; else - return dev->archdata.dma_ops; + return __generic_dma_ops(dev); } -#include +void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, + const struct iommu_ops *iommu, bool coherent); +#define arch_setup_dma_ops arch_setup_dma_ops -static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) -{ - return (dma_addr_t)paddr; -} +#ifdef CONFIG_IOMMU_DMA +void arch_teardown_dma_ops(struct device *dev); +#define arch_teardown_dma_ops arch_teardown_dma_ops +#endif -static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) +/* do not use this function in a driver */ +static inline bool is_device_dma_coherent(struct device *dev) { - return (phys_addr_t)dev_addr; + if (!dev) + return false; + return dev->archdata.dma_coherent; } -static inline int dma_mapping_error(struct device *dev, dma_addr_t dev_addr) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - debug_dma_mapping_error(dev, dev_addr); - return ops->mapping_error(dev, dev_addr); -} +#include -static inline int dma_supported(struct device *dev, u64 mask) +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { - struct dma_map_ops *ops = get_dma_ops(dev); - return ops->dma_supported(dev, mask); + return (dma_addr_t)paddr; } -static inline int dma_set_mask(struct device *dev, u64 mask) +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) { - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - *dev->dma_mask = mask; - - return 0; + return (phys_addr_t)dev_addr; } static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) - return 0; + return false; return addr + size - 1 <= *dev->dma_mask; } @@ -81,45 +88,5 @@ { } -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flags) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *vaddr; - - if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr)) - return vaddr; - - vaddr = ops->alloc(dev, size, dma_handle, flags, NULL); - debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr); - return vaddr; -} - -static inline void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dev_addr) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if (dma_release_from_coherent(dev, get_order(size), vaddr)) - return; - - debug_dma_free_coherent(dev, size, vaddr, dev_addr); - ops->free(dev, size, vaddr, dev_addr, NULL); -} - -/* - * There is no dma_cache_sync() implementation, so just return NULL here. - */ -static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t flags) -{ - return NULL; -} - -static inline void dma_free_noncoherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t handle) -{ -} - #endif /* __KERNEL__ */ #endif /* __ASM_DMA_MAPPING_H */