--- zzzz-none-000/linux-3.10.107/drivers/media/v4l2-core/v4l2-mem2mem.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/media/v4l2-core/v4l2-mem2mem.c 2021-02-04 17:41:59.000000000 +0000 @@ -17,7 +17,7 @@ #include #include -#include +#include #include #include #include @@ -41,6 +41,8 @@ #define TRANS_QUEUED (1 << 0) /* Instance is currently running in hardware */ #define TRANS_RUNNING (1 << 1) +/* Instance is currently aborting */ +#define TRANS_ABORT (1 << 2) /* Offset base for buffers on the destination queue - used to distinguish @@ -95,7 +97,7 @@ */ void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) { - struct v4l2_m2m_buffer *b = NULL; + struct v4l2_m2m_buffer *b; unsigned long flags; spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); @@ -117,7 +119,7 @@ */ void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) { - struct v4l2_m2m_buffer *b = NULL; + struct v4l2_m2m_buffer *b; unsigned long flags; spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); @@ -196,13 +198,17 @@ * 2) at least one destination buffer has to be queued, * 3) streaming has to be on. * + * If a queue is buffered (for example a decoder hardware ringbuffer that has + * to be drained before doing streamoff), allow scheduling without v4l2 buffers + * on that queue. + * * There may also be additional, custom requirements. In such case the driver * should supply a custom callback (job_ready in v4l2_m2m_ops) that should * return 1 if the instance is ready. * An example of the above could be an instance that requires more than one * src/dst buffer per transaction. */ -static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) +void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) { struct v4l2_m2m_dev *m2m_dev; unsigned long flags_job, flags_out, flags_cap; @@ -217,6 +223,14 @@ } spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); + + /* If the context is aborted then don't schedule it */ + if (m2m_ctx->job_flags & TRANS_ABORT) { + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); + dprintk("Aborted context\n"); + return; + } + if (m2m_ctx->job_flags & TRANS_QUEUED) { spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); dprintk("On job queue already\n"); @@ -224,7 +238,8 @@ } spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); - if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) { + if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue) + && !m2m_ctx->out_q_ctx.buffered) { spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); @@ -232,7 +247,8 @@ return; } spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); - if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) { + if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue) + && !m2m_ctx->cap_q_ctx.buffered) { spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, @@ -258,6 +274,42 @@ v4l2_m2m_try_run(m2m_dev); } +EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule); + +/** + * v4l2_m2m_cancel_job() - cancel pending jobs for the context + * + * In case of streamoff or release called on any context, + * 1] If the context is currently running, then abort job will be called + * 2] If the context is queued, then the context will be removed from + * the job_queue + */ +static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) +{ + struct v4l2_m2m_dev *m2m_dev; + unsigned long flags; + + m2m_dev = m2m_ctx->m2m_dev; + spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + + m2m_ctx->job_flags |= TRANS_ABORT; + if (m2m_ctx->job_flags & TRANS_RUNNING) { + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); + dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); + wait_event(m2m_ctx->finished, + !(m2m_ctx->job_flags & TRANS_RUNNING)); + } else if (m2m_ctx->job_flags & TRANS_QUEUED) { + list_del(&m2m_ctx->queue); + m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + dprintk("m2m_ctx: %p had been on queue and was removed\n", + m2m_ctx); + } else { + /* Do nothing, was not on queue/running */ + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); + } +} /** * v4l2_m2m_job_finish() - inform the framework that a job has been finished @@ -305,9 +357,16 @@ struct v4l2_requestbuffers *reqbufs) { struct vb2_queue *vq; + int ret; vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); - return vb2_reqbufs(vq, reqbufs); + ret = vb2_reqbufs(vq, reqbufs); + /* If count == 0, then the owner has released all buffers and he + is no longer owner of the queue. Otherwise we have an owner. */ + if (ret == 0) + vq->owner = reqbufs->count ? file->private_data : NULL; + + return ret; } EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); @@ -375,6 +434,25 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); /** + * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on + * the type + */ +int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf) +{ + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); + ret = vb2_prepare_buf(vq, buf); + if (!ret) + v4l2_m2m_try_schedule(m2m_ctx); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); + +/** * v4l2_m2m_create_bufs() - create a source or destination buffer, depending * on the type */ @@ -430,6 +508,9 @@ unsigned long flags_job, flags; int ret; + /* wait until the current context is dequeued from job_queue */ + v4l2_m2m_cancel_job(m2m_ctx); + q_ctx = get_queue_ctx(m2m_ctx, type); ret = vb2_streamoff(&q_ctx->q, type); if (ret) @@ -438,13 +519,15 @@ m2m_dev = m2m_ctx->m2m_dev; spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); /* We should not be scheduled anymore, since we're dropping a queue. */ - INIT_LIST_HEAD(&m2m_ctx->queue); + if (m2m_ctx->job_flags & TRANS_QUEUED) + list_del(&m2m_ctx->queue); m2m_ctx->job_flags = 0; spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); /* Drop queue, since streamoff returns device to the same state as after * calling reqbufs. */ INIT_LIST_HEAD(&q_ctx->rdy_queue); + q_ctx->num_rdy = 0; spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); if (m2m_dev->curr_ctx == m2m_ctx) { @@ -500,16 +583,25 @@ goto end; } - if (m2m_ctx->m2m_dev->m2m_ops->unlock) - m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv); - + spin_lock_irqsave(&src_q->done_lock, flags); if (list_empty(&src_q->done_list)) poll_wait(file, &src_q->done_wq, wait); - if (list_empty(&dst_q->done_list)) - poll_wait(file, &dst_q->done_wq, wait); + spin_unlock_irqrestore(&src_q->done_lock, flags); - if (m2m_ctx->m2m_dev->m2m_ops->lock) - m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv); + spin_lock_irqsave(&dst_q->done_lock, flags); + if (list_empty(&dst_q->done_list)) { + /* + * If the last buffer was dequeued from the capture queue, + * return immediately. DQBUF will return -EPIPE. + */ + if (dst_q->last_buffer_dequeued) { + spin_unlock_irqrestore(&dst_q->done_lock, flags); + return rc | POLLIN | POLLRDNORM; + } + + poll_wait(file, &dst_q->done_wq, wait); + } + spin_unlock_irqrestore(&dst_q->done_lock, flags); spin_lock_irqsave(&src_q->done_lock, flags); if (!list_empty(&src_q->done_list)) @@ -637,6 +729,13 @@ if (ret) goto err; + /* + * If both queues use same mutex assign it as the common buffer + * queues lock to the m2m context. This lock is used in the + * v4l2_m2m_ioctl_* helpers. + */ + if (out_q_ctx->q.lock == cap_q_ctx->q.lock) + m2m_ctx->q_lock = out_q_ctx->q.lock; return m2m_ctx; err: @@ -652,27 +751,8 @@ */ void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) { - struct v4l2_m2m_dev *m2m_dev; - unsigned long flags; - - m2m_dev = m2m_ctx->m2m_dev; - - spin_lock_irqsave(&m2m_dev->job_spinlock, flags); - if (m2m_ctx->job_flags & TRANS_RUNNING) { - spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); - m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); - dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); - wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING)); - } else if (m2m_ctx->job_flags & TRANS_QUEUED) { - list_del(&m2m_ctx->queue); - m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); - spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); - dprintk("m2m_ctx: %p had been on queue and was removed\n", - m2m_ctx); - } else { - /* Do nothing, was not on queue/running */ - spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); - } + /* wait until the current context is dequeued from job_queue */ + v4l2_m2m_cancel_job(m2m_ctx); vb2_queue_release(&m2m_ctx->cap_q_ctx.q); vb2_queue_release(&m2m_ctx->out_q_ctx.q); @@ -686,13 +766,15 @@ * * Call from buf_queue(), videobuf_queue_ops callback. */ -void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb) +void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) { - struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb); + struct v4l2_m2m_buffer *b = container_of(vbuf, + struct v4l2_m2m_buffer, vb); struct v4l2_m2m_queue_ctx *q_ctx; unsigned long flags; - q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type); + q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type); if (!q_ctx) return; @@ -703,3 +785,117 @@ } EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); +/* Videobuf2 ioctl helpers */ + +int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *rb) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); + +int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, + struct v4l2_create_buffers *create) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); + +int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); + +int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); + +int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); + +int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv, + struct v4l2_buffer *buf) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf); + +int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, + struct v4l2_exportbuffer *eb) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); + +int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, + enum v4l2_buf_type type) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_streamon(file, fh->m2m_ctx, type); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); + +int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, + enum v4l2_buf_type type) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); + +/* + * v4l2_file_operations helpers. It is assumed here same lock is used + * for the output and the capture buffer queue. + */ + +int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct v4l2_fh *fh = file->private_data; + + return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); + +unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait) +{ + struct v4l2_fh *fh = file->private_data; + struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; + unsigned int ret; + + if (m2m_ctx->q_lock) + mutex_lock(m2m_ctx->q_lock); + + ret = v4l2_m2m_poll(file, m2m_ctx, wait); + + if (m2m_ctx->q_lock) + mutex_unlock(m2m_ctx->q_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); +