--- zzzz-none-000/linux-3.10.107/drivers/media/v4l2-core/videobuf2-vmalloc.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/media/v4l2-core/videobuf2-vmalloc.c 2021-02-04 17:41:59.000000000 +0000 @@ -17,17 +17,15 @@ #include #include -#include +#include #include #include struct vb2_vmalloc_buf { void *vaddr; - struct page **pages; - struct vm_area_struct *vma; - int write; + struct frame_vector *vec; + enum dma_data_direction dma_dir; unsigned long size; - unsigned int n_pages; atomic_t refcount; struct vb2_vmarea_handler handler; struct dma_buf *dbuf; @@ -35,7 +33,8 @@ static void vb2_vmalloc_put(void *buf_priv); -static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags) +static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, + enum dma_data_direction dma_dir, gfp_t gfp_flags) { struct vb2_vmalloc_buf *buf; @@ -45,6 +44,7 @@ buf->size = size; buf->vaddr = vmalloc_user(buf->size); + buf->dma_dir = dma_dir; buf->handler.refcount = &buf->refcount; buf->handler.put = vb2_vmalloc_put; buf->handler.arg = buf; @@ -70,65 +70,50 @@ } static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, - unsigned long size, int write) + unsigned long size, + enum dma_data_direction dma_dir) { struct vb2_vmalloc_buf *buf; - unsigned long first, last; - int n_pages, offset; - struct vm_area_struct *vma; - dma_addr_t physp; + struct frame_vector *vec; + int n_pages, offset, i; buf = kzalloc(sizeof(*buf), GFP_KERNEL); if (!buf) return NULL; - buf->write = write; + buf->dma_dir = dma_dir; offset = vaddr & ~PAGE_MASK; buf->size = size; - - - vma = find_vma(current->mm, vaddr); - if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) { - if (vb2_get_contig_userptr(vaddr, size, &vma, &physp)) - goto fail_pages_array_alloc; - buf->vma = vma; - buf->vaddr = ioremap_nocache(physp, size); - if (!buf->vaddr) - goto fail_pages_array_alloc; + vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE); + if (IS_ERR(vec)) + goto fail_pfnvec_create; + buf->vec = vec; + n_pages = frame_vector_count(vec); + if (frame_vector_to_pages(vec) < 0) { + unsigned long *nums = frame_vector_pfns(vec); + + /* + * We cannot get page pointers for these pfns. Check memory is + * physically contiguous and use direct mapping. + */ + for (i = 1; i < n_pages; i++) + if (nums[i-1] + 1 != nums[i]) + goto fail_map; + buf->vaddr = (__force void *) + ioremap_nocache(nums[0] << PAGE_SHIFT, size); } else { - first = vaddr >> PAGE_SHIFT; - last = (vaddr + size - 1) >> PAGE_SHIFT; - buf->n_pages = last - first + 1; - buf->pages = kzalloc(buf->n_pages * sizeof(struct page *), - GFP_KERNEL); - if (!buf->pages) - goto fail_pages_array_alloc; - - /* current->mm->mmap_sem is taken by videobuf2 core */ - n_pages = get_user_pages(current, current->mm, - vaddr & PAGE_MASK, buf->n_pages, - write, 1, /* force */ - buf->pages, NULL); - if (n_pages != buf->n_pages) - goto fail_get_user_pages; - - buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1, + buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1, PAGE_KERNEL); - if (!buf->vaddr) - goto fail_get_user_pages; } + if (!buf->vaddr) + goto fail_map; buf->vaddr += offset; return buf; -fail_get_user_pages: - pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages, - buf->n_pages); - while (--n_pages >= 0) - put_page(buf->pages[n_pages]); - kfree(buf->pages); - -fail_pages_array_alloc: +fail_map: + vb2_destroy_framevec(vec); +fail_pfnvec_create: kfree(buf); return NULL; @@ -139,21 +124,21 @@ struct vb2_vmalloc_buf *buf = buf_priv; unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK; unsigned int i; + struct page **pages; + unsigned int n_pages; - if (buf->pages) { + if (!buf->vec->is_pfns) { + n_pages = frame_vector_count(buf->vec); + pages = frame_vector_pages(buf->vec); if (vaddr) - vm_unmap_ram((void *)vaddr, buf->n_pages); - for (i = 0; i < buf->n_pages; ++i) { - if (buf->write) - set_page_dirty_lock(buf->pages[i]); - put_page(buf->pages[i]); - } - kfree(buf->pages); + vm_unmap_ram((void *)vaddr, n_pages); + if (buf->dma_dir == DMA_FROM_DEVICE) + for (i = 0; i < n_pages; i++) + set_page_dirty_lock(pages[i]); } else { - if (buf->vma) - vb2_put_vma(buf->vma); - iounmap(buf->vaddr); + iounmap((__force void __iomem *)buf->vaddr); } + vb2_destroy_framevec(buf->vec); kfree(buf); } @@ -208,6 +193,185 @@ return 0; } +#ifdef CONFIG_HAS_DMA +/*********************************************/ +/* DMABUF ops for exporters */ +/*********************************************/ + +struct vb2_vmalloc_attachment { + struct sg_table sgt; + enum dma_data_direction dma_dir; +}; + +static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, + struct dma_buf_attachment *dbuf_attach) +{ + struct vb2_vmalloc_attachment *attach; + struct vb2_vmalloc_buf *buf = dbuf->priv; + int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE; + struct sg_table *sgt; + struct scatterlist *sg; + void *vaddr = buf->vaddr; + int ret; + int i; + + attach = kzalloc(sizeof(*attach), GFP_KERNEL); + if (!attach) + return -ENOMEM; + + sgt = &attach->sgt; + ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL); + if (ret) { + kfree(attach); + return ret; + } + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + struct page *page = vmalloc_to_page(vaddr); + + if (!page) { + sg_free_table(sgt); + kfree(attach); + return -ENOMEM; + } + sg_set_page(sg, page, PAGE_SIZE, 0); + vaddr += PAGE_SIZE; + } + + attach->dma_dir = DMA_NONE; + dbuf_attach->priv = attach; + return 0; +} + +static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf, + struct dma_buf_attachment *db_attach) +{ + struct vb2_vmalloc_attachment *attach = db_attach->priv; + struct sg_table *sgt; + + if (!attach) + return; + + sgt = &attach->sgt; + + /* release the scatterlist cache */ + if (attach->dma_dir != DMA_NONE) + dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, + attach->dma_dir); + sg_free_table(sgt); + kfree(attach); + db_attach->priv = NULL; +} + +static struct sg_table *vb2_vmalloc_dmabuf_ops_map( + struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) +{ + struct vb2_vmalloc_attachment *attach = db_attach->priv; + /* stealing dmabuf mutex to serialize map/unmap operations */ + struct mutex *lock = &db_attach->dmabuf->lock; + struct sg_table *sgt; + + mutex_lock(lock); + + sgt = &attach->sgt; + /* return previously mapped sg table */ + if (attach->dma_dir == dma_dir) { + mutex_unlock(lock); + return sgt; + } + + /* release any previous cache */ + if (attach->dma_dir != DMA_NONE) { + dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, + attach->dma_dir); + attach->dma_dir = DMA_NONE; + } + + /* mapping to the client with new direction */ + sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, + dma_dir); + if (!sgt->nents) { + pr_err("failed to map scatterlist\n"); + mutex_unlock(lock); + return ERR_PTR(-EIO); + } + + attach->dma_dir = dma_dir; + + mutex_unlock(lock); + + return sgt; +} + +static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, + struct sg_table *sgt, enum dma_data_direction dma_dir) +{ + /* nothing to be done here */ +} + +static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf) +{ + /* drop reference obtained in vb2_vmalloc_get_dmabuf */ + vb2_vmalloc_put(dbuf->priv); +} + +static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum) +{ + struct vb2_vmalloc_buf *buf = dbuf->priv; + + return buf->vaddr + pgnum * PAGE_SIZE; +} + +static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf) +{ + struct vb2_vmalloc_buf *buf = dbuf->priv; + + return buf->vaddr; +} + +static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf, + struct vm_area_struct *vma) +{ + return vb2_vmalloc_mmap(dbuf->priv, vma); +} + +static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = { + .attach = vb2_vmalloc_dmabuf_ops_attach, + .detach = vb2_vmalloc_dmabuf_ops_detach, + .map_dma_buf = vb2_vmalloc_dmabuf_ops_map, + .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap, + .kmap = vb2_vmalloc_dmabuf_ops_kmap, + .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap, + .vmap = vb2_vmalloc_dmabuf_ops_vmap, + .mmap = vb2_vmalloc_dmabuf_ops_mmap, + .release = vb2_vmalloc_dmabuf_ops_release, +}; + +static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags) +{ + struct vb2_vmalloc_buf *buf = buf_priv; + struct dma_buf *dbuf; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &vb2_vmalloc_dmabuf_ops; + exp_info.size = buf->size; + exp_info.flags = flags; + exp_info.priv = buf; + + if (WARN_ON(!buf->vaddr)) + return NULL; + + dbuf = dma_buf_export(&exp_info); + if (IS_ERR(dbuf)) + return NULL; + + /* dmabuf keeps reference to vb2 buffer */ + atomic_inc(&buf->refcount); + + return dbuf; +} +#endif /* CONFIG_HAS_DMA */ + + /*********************************************/ /* callbacks for DMABUF buffers */ /*********************************************/ @@ -240,7 +404,7 @@ } static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, - unsigned long size, int write) + unsigned long size, enum dma_data_direction dma_dir) { struct vb2_vmalloc_buf *buf; @@ -252,7 +416,7 @@ return ERR_PTR(-ENOMEM); buf->dbuf = dbuf; - buf->write = write; + buf->dma_dir = dma_dir; buf->size = size; return buf; @@ -264,6 +428,9 @@ .put = vb2_vmalloc_put, .get_userptr = vb2_vmalloc_get_userptr, .put_userptr = vb2_vmalloc_put_userptr, +#ifdef CONFIG_HAS_DMA + .get_dmabuf = vb2_vmalloc_get_dmabuf, +#endif .map_dmabuf = vb2_vmalloc_map_dmabuf, .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf, .attach_dmabuf = vb2_vmalloc_attach_dmabuf,