--- zzzz-none-000/linux-3.10.107/include/linux/dmaengine.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/include/linux/dmaengine.h 2021-02-04 17:41:59.000000000 +0000 @@ -11,10 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * * The full GNU General Public License is included in this distribution in the * file called COPYING. */ @@ -22,6 +18,7 @@ #define LINUX_DMAENGINE_H #include +#include #include #include #include @@ -36,19 +33,21 @@ */ typedef s32 dma_cookie_t; #define DMA_MIN_COOKIE 1 -#define DMA_MAX_COOKIE INT_MAX -#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) +static inline int dma_submit_error(dma_cookie_t cookie) +{ + return cookie < 0 ? cookie : 0; +} /** * enum dma_status - DMA transaction status - * @DMA_SUCCESS: transaction completed successfully + * @DMA_COMPLETE: transaction completed * @DMA_IN_PROGRESS: transaction not yet processed * @DMA_PAUSED: transaction is paused * @DMA_ERROR: transaction failed */ enum dma_status { - DMA_SUCCESS, + DMA_COMPLETE, DMA_IN_PROGRESS, DMA_PAUSED, DMA_ERROR, @@ -67,6 +66,7 @@ DMA_XOR_VAL, DMA_PQ_VAL, DMA_MEMSET, + DMA_MEMSET_SG, DMA_INTERRUPT, DMA_SG, DMA_PRIVATE, @@ -124,10 +124,18 @@ * chunk and before first src/dst address for next chunk. * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false. * Ignored for src(assumed 0), if src_inc is true and src_sgl is false. + * @dst_icg: Number of bytes to jump after last dst address of this + * chunk and before the first dst address for next chunk. + * Ignored if dst_inc is true and dst_sgl is false. + * @src_icg: Number of bytes to jump after last src address of this + * chunk and before the first src address for next chunk. + * Ignored if src_inc is true and src_sgl is false. */ struct data_chunk { size_t size; size_t icg; + size_t dst_icg; + size_t src_icg; }; /** @@ -169,12 +177,6 @@ * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client * acknowledges receipt, i.e. has has a chance to establish any dependency * chains - * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) - * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) - * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single - * (if not set, do the source dma-unmapping as page) - * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single - * (if not set, do the destination dma-unmapping as page) * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as @@ -182,40 +184,17 @@ * operation it continues the calculation with new sources * @DMA_PREP_FENCE - tell the driver that subsequent operations depend * on the result of this operation + * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till + * cleared or freed */ enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), DMA_CTRL_ACK = (1 << 1), - DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), - DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), - DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), - DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), - DMA_PREP_PQ_DISABLE_P = (1 << 6), - DMA_PREP_PQ_DISABLE_Q = (1 << 7), - DMA_PREP_CONTINUE = (1 << 8), - DMA_PREP_FENCE = (1 << 9), -}; - -/** - * enum dma_ctrl_cmd - DMA operations that can optionally be exercised - * on a running channel. - * @DMA_TERMINATE_ALL: terminate all ongoing transfers - * @DMA_PAUSE: pause ongoing transfers - * @DMA_RESUME: resume paused transfer - * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers - * that need to runtime reconfigure the slave channels (as opposed to passing - * configuration data in statically from the platform). An additional - * argument of struct dma_slave_config must be passed in with this - * command. - * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller - * into external start mode. - */ -enum dma_ctrl_cmd { - DMA_TERMINATE_ALL, - DMA_PAUSE, - DMA_RESUME, - DMA_SLAVE_CONFIG, - FSLDMA_EXTERNAL_START, + DMA_PREP_PQ_DISABLE_P = (1 << 2), + DMA_PREP_PQ_DISABLE_Q = (1 << 3), + DMA_PREP_CONTINUE = (1 << 4), + DMA_PREP_FENCE = (1 << 5), + DMA_CTRL_REUSE = (1 << 6), }; /** @@ -256,6 +235,16 @@ }; /** + * struct dma_router - DMA router structure + * @dev: pointer to the DMA router device + * @route_free: function to be called when the route can be disconnected + */ +struct dma_router { + struct device *dev; + void (*route_free)(struct device *dev, void *route_data); +}; + +/** * struct dma_chan - devices supply DMA channels, clients use them * @device: ptr to the dma device who supplies this channel, always !%NULL * @cookie: last cookie value returned to client @@ -264,8 +253,10 @@ * @dev: class device for sysfs * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu - * @client-count: how many clients are using this channel + * @client_count: how many clients are using this channel * @table_count: number of appearances in the mem-to-mem allocation table + * @router: pointer to the DMA router structure + * @route_data: channel specific data for the router * @private: private data for certain client-channel associations */ struct dma_chan { @@ -281,15 +272,20 @@ struct dma_chan_percpu __percpu *local; int client_count; int table_count; + + /* DMA router */ + struct dma_router *router; + void *route_data; + void *private; }; /** * struct dma_chan_dev - relate sysfs device node to backing channel device - * @chan - driver channel device - * @device - sysfs device - * @dev_id - parent dma_device dev_id - * @idr_ref - reference count to gate release of dma_device dev_id + * @chan: driver channel device + * @device: sysfs device + * @dev_id: parent dma_device dev_id + * @idr_ref: reference count to gate release of dma_device dev_id */ struct dma_chan_dev { struct dma_chan *chan; @@ -299,23 +295,28 @@ }; /** - * enum dma_slave_buswidth - defines bus with of the DMA slave + * enum dma_slave_buswidth - defines bus width of the DMA slave * device, source or target buses */ enum dma_slave_buswidth { DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, DMA_SLAVE_BUSWIDTH_1_BYTE = 1, DMA_SLAVE_BUSWIDTH_2_BYTES = 2, + DMA_SLAVE_BUSWIDTH_3_BYTES = 3, DMA_SLAVE_BUSWIDTH_4_BYTES = 4, DMA_SLAVE_BUSWIDTH_8_BYTES = 8, + DMA_SLAVE_BUSWIDTH_16_BYTES = 16, + DMA_SLAVE_BUSWIDTH_32_BYTES = 32, + DMA_SLAVE_BUSWIDTH_64_BYTES = 64, }; /** * struct dma_slave_config - dma slave channel runtime config * @direction: whether the data shall go in or out on this slave - * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are - * legal values, DMA_BIDIRECTIONAL is not acceptable since we - * need to differentiate source and target addresses. + * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are + * legal values. DEPRECATED, drivers should use the direction argument + * to the device_prep_slave_sg and device_prep_dma_cyclic functions or + * the dir field in the dma_interleaved_template structure. * @src_addr: this is the physical address where DMA slave data * should be read (RX), if the source is memory this argument is * ignored. @@ -345,19 +346,14 @@ * This struct is passed in as configuration data to a DMA engine * in order to set up a certain channel for DMA transport at runtime. * The DMA device/engine has to provide support for an additional - * command in the channel config interface, DMA_SLAVE_CONFIG - * and this struct will then be passed in as an argument to the - * DMA engine device_control() function. + * callback in the dma_device structure, device_config and this struct + * will then be passed in as an argument to the function. * - * The rationale for adding configuration information to this struct - * is as follows: if it is likely that most DMA slave controllers in - * the world will support the configuration option, then make it - * generic. If not: if it is fixed so that it be sent in static from - * the platform data, then prefer to do that. Else, if it is neither - * fixed at runtime, nor generic enough (such as bus mastership on - * some CPU family and whatnot) then create a custom slave config - * struct and pass that, then make this config a member of that - * struct, if applicable. + * The rationale for adding configuration information to this struct is as + * follows: if it is likely that more than one DMA slave controllers in + * the world will support the configuration option, then make it generic. + * If not: if it is fixed so that it be sent in static from the platform + * data, then prefer to do that. */ struct dma_slave_config { enum dma_transfer_direction direction; @@ -371,6 +367,56 @@ unsigned int slave_id; }; +/** + * enum dma_residue_granularity - Granularity of the reported transfer residue + * @DMA_RESIDUE_GRANULARITY_DESCRIPTOR: Residue reporting is not support. The + * DMA channel is only able to tell whether a descriptor has been completed or + * not, which means residue reporting is not supported by this channel. The + * residue field of the dma_tx_state field will always be 0. + * @DMA_RESIDUE_GRANULARITY_SEGMENT: Residue is updated after each successfully + * completed segment of the transfer (For cyclic transfers this is after each + * period). This is typically implemented by having the hardware generate an + * interrupt after each transferred segment and then the drivers updates the + * outstanding residue by the size of the segment. Another possibility is if + * the hardware supports scatter-gather and the segment descriptor has a field + * which gets set after the segment has been completed. The driver then counts + * the number of segments without the flag set to compute the residue. + * @DMA_RESIDUE_GRANULARITY_BURST: Residue is updated after each transferred + * burst. This is typically only supported if the hardware has a progress + * register of some sort (E.g. a register with the current read/write address + * or a register with the amount of bursts/beats/bytes that have been + * transferred or still need to be transferred). + */ +enum dma_residue_granularity { + DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0, + DMA_RESIDUE_GRANULARITY_SEGMENT = 1, + DMA_RESIDUE_GRANULARITY_BURST = 2, +}; + +/* struct dma_slave_caps - expose capabilities of a slave channel only + * + * @src_addr_widths: bit mask of src addr widths the channel supports + * @dst_addr_widths: bit mask of dstn addr widths the channel supports + * @directions: bit mask of slave direction the channel supported + * since the enum dma_transfer_direction is not defined as bits for each + * type of direction, the dma controller should fill (1 << ) and same + * should be checked by controller as well + * @cmd_pause: true, if pause and thereby resume is supported + * @cmd_terminate: true, if terminate cmd is supported + * @residue_granularity: granularity of the reported transfer residue + * @descriptor_reuse: if a descriptor can be reused by client and + * resubmitted multiple times + */ +struct dma_slave_caps { + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + bool cmd_pause; + bool cmd_terminate; + enum dma_residue_granularity residue_granularity; + bool descriptor_reuse; +}; + static inline const char *dma_chan_name(struct dma_chan *chan) { return dev_name(&chan->dev->device); @@ -392,6 +438,18 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); typedef void (*dma_async_tx_callback)(void *dma_async_param); + +struct dmaengine_unmap_data { + u8 map_cnt; + u8 to_cnt; + u8 from_cnt; + u8 bidi_cnt; + struct device *dev; + struct kref kref; + size_t len; + dma_addr_t addr[0]; +}; + /** * struct dma_async_tx_descriptor - async transaction descriptor * ---dma generic offload fields--- @@ -401,7 +459,8 @@ * communicate status * @phys: physical address of the descriptor * @chan: target channel for this operation - * @tx_submit: set the prepared descriptor(s) to be executed by the engine + * @tx_submit: accept the descriptor, assign ordered cookie and mark the + * descriptor pending. To be pushed on .issue_pending() call * @callback: routine to call after this operation is complete * @callback_param: general parameter to pass to the callback routine * ---async_tx api specific fields--- @@ -415,8 +474,10 @@ dma_addr_t phys; struct dma_chan *chan; dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); + int (*desc_free)(struct dma_async_tx_descriptor *tx); dma_async_tx_callback callback; void *callback_param; + struct dmaengine_unmap_data *unmap; #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *parent; @@ -424,6 +485,40 @@ #endif }; +#ifdef CONFIG_DMA_ENGINE +static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, + struct dmaengine_unmap_data *unmap) +{ + kref_get(&unmap->kref); + tx->unmap = unmap; +} + +struct dmaengine_unmap_data * +dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags); +void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap); +#else +static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, + struct dmaengine_unmap_data *unmap) +{ +} +static inline struct dmaengine_unmap_data * +dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) +{ + return NULL; +} +static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) +{ +} +#endif + +static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) +{ + if (tx->unmap) { + dmaengine_unmap_put(tx->unmap); + tx->unmap = NULL; + } +} + #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH static inline void txd_lock(struct dma_async_tx_descriptor *txd) { @@ -498,6 +593,20 @@ }; /** + * enum dmaengine_alignment - defines alignment of the DMA async tx + * buffers + */ +enum dmaengine_alignment { + DMAENGINE_ALIGN_1_BYTE = 0, + DMAENGINE_ALIGN_2_BYTES = 1, + DMAENGINE_ALIGN_4_BYTES = 2, + DMAENGINE_ALIGN_8_BYTES = 3, + DMAENGINE_ALIGN_16_BYTES = 4, + DMAENGINE_ALIGN_32_BYTES = 5, + DMAENGINE_ALIGN_64_BYTES = 6, +}; + +/** * struct dma_device - info on the entity supplying DMA services * @chancnt: how many DMA channels are supported * @privatecnt: how many DMA channels are requested by dma_request_channel @@ -512,6 +621,14 @@ * @fill_align: alignment shift for memset operations * @dev_id: unique device ID * @dev: struct device reference for dma mapping api + * @src_addr_widths: bit mask of src addr widths the device supports + * @dst_addr_widths: bit mask of dst addr widths the device supports + * @directions: bit mask of slave direction the device supports since + * the enum dma_transfer_direction is not defined as bits for + * each type of direction, the dma controller should fill (1 << + * ) and same should be checked by controller as well + * @residue_granularity: granularity of the transfer residue reported + * by tx_status * @device_alloc_chan_resources: allocate resources and return the * number of allocated descriptors * @device_free_chan_resources: release DMA channel's resources @@ -521,14 +638,24 @@ * @device_prep_dma_pq: prepares a pq operation * @device_prep_dma_pq_val: prepares a pqzero_sum operation * @device_prep_dma_memset: prepares a memset operation + * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list * @device_prep_dma_interrupt: prepares an end of chain interrupt operation * @device_prep_slave_sg: prepares a slave dma operation * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. * The function takes a buffer of size buf_len. The callback function will * be called after period_len bytes have been transferred. * @device_prep_interleaved_dma: Transfer expression in a generic way. - * @device_control: manipulate all pending operations on a channel, returns - * zero or error code + * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address + * @device_prep_dma_custom_mapping: prepares a dma operation from dma driver + * specific custom data + * @device_config: Pushes a new configuration to a channel, return 0 or an error + * code + * @device_pause: Pauses any transfer happening on a channel. Returns + * 0 or an error code + * @device_resume: Resumes any transfer on a channel previously + * paused. Returns 0 or an error code + * @device_terminate_all: Aborts all transfers on a channel. Returns 0 + * or an error code * @device_tx_status: poll for transaction completion, the optional * txstate parameter can be supplied with a pointer to get a * struct with auxiliary transfer status information, otherwise the call @@ -544,23 +671,28 @@ dma_cap_mask_t cap_mask; unsigned short max_xor; unsigned short max_pq; - u8 copy_align; - u8 xor_align; - u8 pq_align; - u8 fill_align; + enum dmaengine_alignment copy_align; + enum dmaengine_alignment xor_align; + enum dmaengine_alignment pq_align; + enum dmaengine_alignment fill_align; #define DMA_HAS_PQ_CONTINUE (1 << 15) int dev_id; struct device *dev; + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + enum dma_residue_granularity residue_granularity; + int (*device_alloc_chan_resources)(struct dma_chan *chan); void (*device_free_chan_resources)(struct dma_chan *chan); struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( - struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, size_t len, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_xor)( - struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, + struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, @@ -576,6 +708,9 @@ struct dma_async_tx_descriptor *(*device_prep_dma_memset)( struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)( + struct dma_chan *chan, struct scatterlist *sg, + unsigned int nents, int value, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( struct dma_chan *chan, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_sg)( @@ -591,12 +726,23 @@ struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context); + unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( struct dma_chan *chan, struct dma_interleaved_template *xt, unsigned long flags); - int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg); + struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)( + struct dma_chan *chan, dma_addr_t dst, u64 data, + unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_custom_mapping)( + struct dma_chan *chan, void *data, + unsigned long flags); + + int (*device_config)(struct dma_chan *chan, + struct dma_slave_config *config); + int (*device_pause)(struct dma_chan *chan); + int (*device_resume)(struct dma_chan *chan); + int (*device_terminate_all)(struct dma_chan *chan); + int (*device_terminate_all_graceful)(struct dma_chan *chan, bool graceful); enum dma_status (*device_tx_status)(struct dma_chan *chan, dma_cookie_t cookie, @@ -604,21 +750,13 @@ void (*device_issue_pending)(struct dma_chan *chan); }; -static inline int dmaengine_device_control(struct dma_chan *chan, - enum dma_ctrl_cmd cmd, - unsigned long arg) -{ - if (chan->device->device_control) - return chan->device->device_control(chan, cmd, arg); - - return -ENOSYS; -} - static inline int dmaengine_slave_config(struct dma_chan *chan, struct dma_slave_config *config) { - return dmaengine_device_control(chan, DMA_SLAVE_CONFIG, - (unsigned long)config); + if (chan->device->device_config) + return chan->device->device_config(chan, config); + + return -ENOSYS; } static inline bool is_slave_direction(enum dma_transfer_direction direction) @@ -665,7 +803,7 @@ unsigned long flags) { return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, - period_len, dir, flags, NULL); + period_len, dir, flags); } static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( @@ -675,19 +813,66 @@ return chan->device->device_prep_interleaved_dma(chan, xt, flags); } +static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset( + struct dma_chan *chan, dma_addr_t dest, int value, size_t len, + unsigned long flags) +{ + if (!chan || !chan->device) + return NULL; + + return chan->device->device_prep_dma_memset(chan, dest, value, + len, flags); +} + +static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( + struct dma_chan *chan, + struct scatterlist *dst_sg, unsigned int dst_nents, + struct scatterlist *src_sg, unsigned int src_nents, + unsigned long flags) +{ + return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents, + src_sg, src_nents, flags); +} + +static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_custom_mapping( + struct dma_chan *chan, + void *data, + unsigned long flags) +{ + return chan->device->device_prep_dma_custom_mapping(chan, data, + flags); +} + static inline int dmaengine_terminate_all(struct dma_chan *chan) { - return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); + if (chan->device->device_terminate_all) + return chan->device->device_terminate_all(chan); + + return -ENOSYS; +} + +static inline int dmaengine_terminate_all_graceful(struct dma_chan *chan, bool graceful) +{ + if (chan->device->device_terminate_all) + return chan->device->device_terminate_all_graceful(chan, graceful); + + return -ENOSYS; } static inline int dmaengine_pause(struct dma_chan *chan) { - return dmaengine_device_control(chan, DMA_PAUSE, 0); + if (chan->device->device_pause) + return chan->device->device_pause(chan); + + return -ENOSYS; } static inline int dmaengine_resume(struct dma_chan *chan) { - return dmaengine_device_control(chan, DMA_RESUME, 0); + if (chan->device->device_resume) + return chan->device->device_resume(chan); + + return -ENOSYS; } static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan, @@ -701,7 +886,8 @@ return desc->tx_submit(desc); } -static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) +static inline bool dmaengine_check_align(enum dmaengine_alignment align, + size_t off1, size_t off2, size_t len) { size_t mask; @@ -791,6 +977,33 @@ BUG(); } +static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg, + size_t dir_icg) +{ + if (inc) { + if (dir_icg) + return dir_icg; + else if (sgl) + return icg; + } + + return 0; +} + +static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt, + struct data_chunk *chunk) +{ + return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl, + chunk->icg, chunk->dst_icg); +} + +static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt, + struct data_chunk *chunk) +{ + return dmaengine_get_icg(xt->src_inc, xt->src_sgl, + chunk->icg, chunk->src_icg); +} + /* --- public DMA engine API --- */ #ifdef CONFIG_DMA_ENGINE @@ -805,18 +1018,6 @@ } #endif -#ifdef CONFIG_NET_DMA -#define net_dmaengine_get() dmaengine_get() -#define net_dmaengine_put() dmaengine_put() -#else -static inline void net_dmaengine_get(void) -{ -} -static inline void net_dmaengine_put(void) -{ -} -#endif - #ifdef CONFIG_ASYNC_TX_DMA #define async_dmaengine_get() dmaengine_get() #define async_dmaengine_put() dmaengine_put() @@ -838,16 +1039,8 @@ return NULL; } #endif /* CONFIG_ASYNC_TX_DMA */ - -dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, - void *dest, void *src, size_t len); -dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, - struct page *page, unsigned int offset, void *kdata, size_t len); -dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, - struct page *dest_pg, unsigned int dest_off, struct page *src_pg, - unsigned int src_off, size_t len); void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, - struct dma_chan *chan); + struct dma_chan *chan); static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) { @@ -945,10 +1138,10 @@ { if (last_complete <= last_used) { if ((cookie <= last_complete) || (cookie > last_used)) - return DMA_SUCCESS; + return DMA_COMPLETE; } else { if ((cookie <= last_complete) && (cookie > last_used)) - return DMA_SUCCESS; + return DMA_COMPLETE; } return DMA_IN_PROGRESS; } @@ -963,18 +1156,30 @@ } } -enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); #ifdef CONFIG_DMA_ENGINE +struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); +enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); void dma_issue_pending_all(void); struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); +struct dma_chan *dma_request_slave_channel_reason(struct device *dev, + const char *name); struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); void dma_release_channel(struct dma_chan *chan); +int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); #else +static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) +{ + return NULL; +} +static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) +{ + return DMA_COMPLETE; +} static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) { - return DMA_SUCCESS; + return DMA_COMPLETE; } static inline void dma_issue_pending_all(void) { @@ -984,6 +1189,11 @@ { return NULL; } +static inline struct dma_chan *dma_request_slave_channel_reason( + struct device *dev, const char *name) +{ + return ERR_PTR(-ENODEV); +} static inline struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name) { @@ -992,15 +1202,53 @@ static inline void dma_release_channel(struct dma_chan *chan) { } +static inline int dma_get_slave_caps(struct dma_chan *chan, + struct dma_slave_caps *caps) +{ + return -ENXIO; +} #endif +static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) +{ + struct dma_slave_caps caps; + + dma_get_slave_caps(tx->chan, &caps); + + if (caps.descriptor_reuse) { + tx->flags |= DMA_CTRL_REUSE; + return 0; + } else { + return -EPERM; + } +} + +static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx) +{ + tx->flags &= ~DMA_CTRL_REUSE; +} + +static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx) +{ + return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE; +} + +static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc) +{ + /* this is supported for reusable desc, so check that */ + if (dmaengine_desc_test_reuse(desc)) + return desc->desc_free(desc); + else + return -EPERM; +} + /* --- DMA device --- */ int dma_async_device_register(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device); void dma_run_dependencies(struct dma_async_tx_descriptor *tx); -struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); -struct dma_chan *net_dma_find_channel(void); +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); +struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) #define dma_request_slave_channel_compat(mask, x, y, dev, name) \ __dma_request_slave_channel_compat(&(mask), x, y, dev, name) @@ -1008,7 +1256,7 @@ static inline struct dma_chan *__dma_request_slave_channel_compat(const dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param, - struct device *dev, char *name) + struct device *dev, const char *name) { struct dma_chan *chan; @@ -1016,29 +1264,9 @@ if (chan) return chan; + if (!fn || !fn_param) + return NULL; + return __dma_request_channel(mask, fn, fn_param); } - -/* --- Helper iov-locking functions --- */ - -struct dma_page_list { - char __user *base_address; - int nr_pages; - struct page **pages; -}; - -struct dma_pinned_list { - int nr_iovecs; - struct dma_page_list page_list[0]; -}; - -struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len); -void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list); - -dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, - struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len); -dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, - struct dma_pinned_list *pinned_list, struct page *page, - unsigned int offset, size_t len); - #endif /* DMAENGINE_H */