--- zzzz-none-000/linux-4.4.271/include/linux/dmaengine.h 2021-06-03 06:22:09.000000000 +0000 +++ hawkeye-5590-750/linux-4.4.271/include/linux/dmaengine.h 2023-04-19 10:22:30.000000000 +0000 @@ -646,6 +646,8 @@ * be called after period_len bytes have been transferred. * @device_prep_interleaved_dma: Transfer expression in a generic way. * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address + * @device_prep_dma_custom_mapping: prepares a dma operation from dma driver + * specific custom data * @device_config: Pushes a new configuration to a channel, return 0 or an error * code * @device_pause: Pauses any transfer happening on a channel. Returns @@ -731,12 +733,16 @@ struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)( struct dma_chan *chan, dma_addr_t dst, u64 data, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_custom_mapping)( + struct dma_chan *chan, void *data, + unsigned long flags); int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config); int (*device_pause)(struct dma_chan *chan); int (*device_resume)(struct dma_chan *chan); int (*device_terminate_all)(struct dma_chan *chan); + int (*device_terminate_all_graceful)(struct dma_chan *chan, bool graceful); enum dma_status (*device_tx_status)(struct dma_chan *chan, dma_cookie_t cookie, @@ -846,6 +852,15 @@ src_sg, src_nents, flags); } +static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_custom_mapping( + struct dma_chan *chan, + void *data, + unsigned long flags) +{ + return chan->device->device_prep_dma_custom_mapping(chan, data, + flags); +} + static inline int dmaengine_terminate_all(struct dma_chan *chan) { if (chan->device->device_terminate_all) @@ -853,6 +868,14 @@ return -ENOSYS; } + +static inline int dmaengine_terminate_all_graceful(struct dma_chan *chan, bool graceful) +{ + if (chan->device->device_terminate_all) + return chan->device->device_terminate_all_graceful(chan, graceful); + + return -ENOSYS; +} static inline int dmaengine_pause(struct dma_chan *chan) {