--- zzzz-none-000/linux-3.10.107/drivers/gpu/drm/exynos/exynos_drm_ipp.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/gpu/drm/exynos/exynos_drm_ipp.c 2021-02-04 17:41:59.000000000 +0000 @@ -12,12 +12,10 @@ * */ #include -#include #include #include #include #include -#include #include #include @@ -47,9 +45,6 @@ #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev)) #define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M) -/* platform device pointer for ipp device. */ -static struct platform_device *exynos_drm_ipp_pdev; - /* * A structure of event. * @@ -77,7 +72,6 @@ u32 prop_id; u32 buf_id; struct drm_exynos_ipp_buf_info buf_info; - struct drm_file *filp; }; /* @@ -105,37 +99,8 @@ static DEFINE_MUTEX(exynos_drm_ippdrv_lock); static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list); -int exynos_platform_device_ipp_register(void) -{ - struct platform_device *pdev; - - if (exynos_drm_ipp_pdev) - return -EEXIST; - - pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); - - exynos_drm_ipp_pdev = pdev; - - return 0; -} - -void exynos_platform_device_ipp_unregister(void) -{ - if (exynos_drm_ipp_pdev) { - platform_device_unregister(exynos_drm_ipp_pdev); - exynos_drm_ipp_pdev = NULL; - } -} - int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv) { - DRM_DEBUG_KMS("%s\n", __func__); - - if (!ippdrv) - return -EINVAL; - mutex_lock(&exynos_drm_ippdrv_lock); list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list); mutex_unlock(&exynos_drm_ippdrv_lock); @@ -145,11 +110,6 @@ int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv) { - DRM_DEBUG_KMS("%s\n", __func__); - - if (!ippdrv) - return -EINVAL; - mutex_lock(&exynos_drm_ippdrv_lock); list_del(&ippdrv->drv_list); mutex_unlock(&exynos_drm_ippdrv_lock); @@ -157,57 +117,47 @@ return 0; } -static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj, - u32 *idp) +static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj) { int ret; - DRM_DEBUG_KMS("%s\n", __func__); - - /* do the allocation under our mutexlock */ mutex_lock(lock); ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL); mutex_unlock(lock); - if (ret < 0) - return ret; - *idp = ret; - return 0; + return ret; +} + +static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id) +{ + mutex_lock(lock); + idr_remove(id_idr, id); + mutex_unlock(lock); } static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id) { void *obj; - DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id); - mutex_lock(lock); - - /* find object using handle */ obj = idr_find(id_idr, id); - if (!obj) { - DRM_ERROR("failed to find object.\n"); - mutex_unlock(lock); - return ERR_PTR(-ENODEV); - } - mutex_unlock(lock); return obj; } -static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv, - enum drm_exynos_ipp_cmd cmd) +static int ipp_check_driver(struct exynos_drm_ippdrv *ippdrv, + struct drm_exynos_ipp_property *property) { - /* - * check dedicated flag and WB, OUTPUT operation with - * power on state. - */ - if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) && - !pm_runtime_suspended(ippdrv->dev))) - return true; + if (ippdrv->dedicated || (!ipp_is_m2m_cmd(property->cmd) && + !pm_runtime_suspended(ippdrv->dev))) + return -EBUSY; - return false; + if (ippdrv->check_property && + ippdrv->check_property(ippdrv->dev, property)) + return -EINVAL; + + return 0; } static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx, @@ -215,63 +165,30 @@ { struct exynos_drm_ippdrv *ippdrv; u32 ipp_id = property->ipp_id; - - DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id); + int ret; if (ipp_id) { - /* find ipp driver using idr */ - ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, - ipp_id); - if (IS_ERR(ippdrv)) { - DRM_ERROR("not found ipp%d driver.\n", ipp_id); - return ippdrv; - } - - /* - * WB, OUTPUT opertion not supported multi-operation. - * so, make dedicated state at set property ioctl. - * when ipp driver finished operations, clear dedicated flags. - */ - if (ipp_check_dedicated(ippdrv, property->cmd)) { - DRM_ERROR("already used choose device.\n"); - return ERR_PTR(-EBUSY); + ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id); + if (!ippdrv) { + DRM_DEBUG("ipp%d driver not found\n", ipp_id); + return ERR_PTR(-ENODEV); } - /* - * This is necessary to find correct device in ipp drivers. - * ipp drivers have different abilities, - * so need to check property. - */ - if (ippdrv->check_property && - ippdrv->check_property(ippdrv->dev, property)) { - DRM_ERROR("not support property.\n"); - return ERR_PTR(-EINVAL); + ret = ipp_check_driver(ippdrv, property); + if (ret < 0) { + DRM_DEBUG("ipp%d driver check error %d\n", ipp_id, ret); + return ERR_PTR(ret); } return ippdrv; } else { - /* - * This case is search all ipp driver for finding. - * user application don't set ipp_id in this case, - * so ipp subsystem search correct driver in driver list. - */ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { - if (ipp_check_dedicated(ippdrv, property->cmd)) { - DRM_DEBUG_KMS("%s:used device.\n", __func__); - continue; - } - - if (ippdrv->check_property && - ippdrv->check_property(ippdrv->dev, property)) { - DRM_DEBUG_KMS("%s:not support property.\n", - __func__); - continue; - } - - return ippdrv; + ret = ipp_check_driver(ippdrv, property); + if (ret == 0) + return ippdrv; } - DRM_ERROR("not support ipp driver operations.\n"); + DRM_DEBUG("cannot find driver suitable for given property.\n"); } return ERR_PTR(-ENODEV); @@ -283,27 +200,24 @@ struct drm_exynos_ipp_cmd_node *c_node; int count = 0; - DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id); - - if (list_empty(&exynos_drm_ippdrv_list)) { - DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__); - return ERR_PTR(-ENODEV); - } + DRM_DEBUG_KMS("prop_id[%d]\n", prop_id); /* * This case is search ipp driver by prop_id handle. * sometimes, ipp subsystem find driver by prop_id. - * e.g PAUSE state, queue buf, command contro. + * e.g PAUSE state, queue buf, command control. */ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { - DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__, - count++, (int)ippdrv); + DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv); - if (!list_empty(&ippdrv->cmd_list)) { - list_for_each_entry(c_node, &ippdrv->cmd_list, list) - if (c_node->property.prop_id == prop_id) - return ippdrv; + mutex_lock(&ippdrv->cmd_lock); + list_for_each_entry(c_node, &ippdrv->cmd_list, list) { + if (c_node->property.prop_id == prop_id) { + mutex_unlock(&ippdrv->cmd_lock); + return ippdrv; + } } + mutex_unlock(&ippdrv->cmd_lock); } return ERR_PTR(-ENODEV); @@ -313,15 +227,12 @@ struct drm_file *file) { struct drm_exynos_file_private *file_priv = file->driver_priv; - struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; - struct device *dev = priv->dev; + struct device *dev = file_priv->ipp_dev; struct ipp_context *ctx = get_ipp_context(dev); struct drm_exynos_ipp_prop_list *prop_list = data; struct exynos_drm_ippdrv *ippdrv; int count = 0; - DRM_DEBUG_KMS("%s\n", __func__); - if (!ctx) { DRM_ERROR("invalid context.\n"); return -EINVAL; @@ -332,11 +243,12 @@ return -EINVAL; } - DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id); + DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id); if (!prop_list->ipp_id) { list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) count++; + /* * Supports ippdrv list count for user application. * First step user application getting ippdrv count. @@ -346,7 +258,7 @@ } else { /* * Getting ippdrv capability by ipp_id. - * some deivce not supported wb, output interface. + * some device not supported wb, output interface. * so, user application detect correct ipp driver * using this ioctl. */ @@ -355,10 +267,10 @@ if (!ippdrv) { DRM_ERROR("not found ipp%d driver.\n", prop_list->ipp_id); - return -EINVAL; + return -ENODEV; } - prop_list = ippdrv->prop_list; + *prop_list = ippdrv->prop_list; } return 0; @@ -371,60 +283,21 @@ struct drm_exynos_pos *pos = &config->pos; struct drm_exynos_sz *sz = &config->sz; - DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n", - __func__, property->prop_id, idx ? "dst" : "src", config->fmt); + DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n", + property->prop_id, idx ? "dst" : "src", config->fmt); - DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n", - __func__, pos->x, pos->y, pos->w, pos->h, + DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n", + pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize, config->flip, config->degree); } -static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property) -{ - struct exynos_drm_ippdrv *ippdrv; - struct drm_exynos_ipp_cmd_node *c_node; - u32 prop_id = property->prop_id; - - DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id); - - ippdrv = ipp_find_drv_by_handle(prop_id); - if (IS_ERR(ippdrv)) { - DRM_ERROR("failed to get ipp driver.\n"); - return -EINVAL; - } - - /* - * Find command node using command list in ippdrv. - * when we find this command no using prop_id. - * return property information set in this command node. - */ - list_for_each_entry(c_node, &ippdrv->cmd_list, list) { - if ((c_node->property.prop_id == prop_id) && - (c_node->state == IPP_STATE_STOP)) { - DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n", - __func__, property->cmd, (int)ippdrv); - - c_node->property = *property; - return 0; - } - } - - DRM_ERROR("failed to search property.\n"); - - return -EINVAL; -} - static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void) { struct drm_exynos_ipp_cmd_work *cmd_work; - DRM_DEBUG_KMS("%s\n", __func__); - cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL); - if (!cmd_work) { - DRM_ERROR("failed to alloc cmd_work.\n"); + if (!cmd_work) return ERR_PTR(-ENOMEM); - } INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd); @@ -435,15 +308,11 @@ { struct drm_exynos_ipp_event_work *event_work; - DRM_DEBUG_KMS("%s\n", __func__); - event_work = kzalloc(sizeof(*event_work), GFP_KERNEL); - if (!event_work) { - DRM_ERROR("failed to alloc event_work.\n"); + if (!event_work) return ERR_PTR(-ENOMEM); - } - INIT_WORK((struct work_struct *)event_work, ipp_sched_event); + INIT_WORK(&event_work->work, ipp_sched_event); return event_work; } @@ -452,16 +321,14 @@ struct drm_file *file) { struct drm_exynos_file_private *file_priv = file->driver_priv; - struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; - struct device *dev = priv->dev; + struct device *dev = file_priv->ipp_dev; struct ipp_context *ctx = get_ipp_context(dev); struct drm_exynos_ipp_property *property = data; struct exynos_drm_ippdrv *ippdrv; struct drm_exynos_ipp_cmd_node *c_node; + u32 prop_id; int ret, i; - DRM_DEBUG_KMS("%s\n", __func__); - if (!ctx) { DRM_ERROR("invalid context.\n"); return -EINVAL; @@ -472,6 +339,8 @@ return -EINVAL; } + prop_id = property->prop_id; + /* * This is log print for user application property. * user application set various property. @@ -480,14 +349,24 @@ ipp_print_property(property, i); /* - * set property ioctl generated new prop_id. - * but in this case already asigned prop_id using old set property. - * e.g PAUSE state. this case supports find current prop_id and use it - * instead of allocation. + * In case prop_id is not zero try to set existing property. */ - if (property->prop_id) { - DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id); - return ipp_find_and_set_property(property); + if (prop_id) { + c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, prop_id); + + if (!c_node || c_node->filp != file) { + DRM_DEBUG_KMS("prop_id[%d] not found\n", prop_id); + return -EINVAL; + } + + if (c_node->state != IPP_STATE_STOP) { + DRM_DEBUG_KMS("prop_id[%d] not stopped\n", prop_id); + return -EINVAL; + } + + c_node->property = *property; + + return 0; } /* find ipp driver using ipp id */ @@ -499,46 +378,46 @@ /* allocate command node */ c_node = kzalloc(sizeof(*c_node), GFP_KERNEL); - if (!c_node) { - DRM_ERROR("failed to allocate map node.\n"); + if (!c_node) return -ENOMEM; - } - /* create property id */ - ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node, - &property->prop_id); - if (ret) { + ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node); + if (ret < 0) { DRM_ERROR("failed to create id.\n"); goto err_clear; } + property->prop_id = ret; - DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n", - __func__, property->prop_id, property->cmd, (int)ippdrv); + DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n", + property->prop_id, property->cmd, (int)ippdrv); /* stored property information and ippdrv in private data */ - c_node->priv = priv; c_node->property = *property; c_node->state = IPP_STATE_IDLE; + c_node->filp = file; c_node->start_work = ipp_create_cmd_work(); if (IS_ERR(c_node->start_work)) { DRM_ERROR("failed to create start work.\n"); - goto err_clear; + ret = PTR_ERR(c_node->start_work); + goto err_remove_id; } c_node->stop_work = ipp_create_cmd_work(); if (IS_ERR(c_node->stop_work)) { DRM_ERROR("failed to create stop work.\n"); + ret = PTR_ERR(c_node->stop_work); goto err_free_start; } c_node->event_work = ipp_create_event_work(); if (IS_ERR(c_node->event_work)) { DRM_ERROR("failed to create event work.\n"); + ret = PTR_ERR(c_node->event_work); goto err_free_stop; } - mutex_init(&c_node->cmd_lock); + mutex_init(&c_node->lock); mutex_init(&c_node->mem_lock); mutex_init(&c_node->event_lock); @@ -549,8 +428,9 @@ INIT_LIST_HEAD(&c_node->mem_list[i]); INIT_LIST_HEAD(&c_node->event_list); - list_splice_init(&priv->event_list, &c_node->event_list); + mutex_lock(&ippdrv->cmd_lock); list_add_tail(&c_node->list, &ippdrv->cmd_list); + mutex_unlock(&ippdrv->cmd_lock); /* make dedicated state without m2m */ if (!ipp_is_m2m_cmd(property->cmd)) @@ -562,253 +442,180 @@ kfree(c_node->stop_work); err_free_start: kfree(c_node->start_work); +err_remove_id: + ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id); err_clear: kfree(c_node); return ret; } -static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node) -{ - DRM_DEBUG_KMS("%s\n", __func__); - - /* delete list */ - list_del(&c_node->list); - - /* destroy mutex */ - mutex_destroy(&c_node->cmd_lock); - mutex_destroy(&c_node->mem_lock); - mutex_destroy(&c_node->event_lock); - - /* free command node */ - kfree(c_node->start_work); - kfree(c_node->stop_work); - kfree(c_node->event_work); - kfree(c_node); -} - -static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node) -{ - struct drm_exynos_ipp_property *property = &c_node->property; - struct drm_exynos_ipp_mem_node *m_node; - struct list_head *head; - int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, }; - - DRM_DEBUG_KMS("%s\n", __func__); +static int ipp_validate_mem_node(struct drm_device *drm_dev, + struct drm_exynos_ipp_mem_node *m_node, + struct drm_exynos_ipp_cmd_node *c_node) +{ + struct drm_exynos_ipp_config *ipp_cfg; + unsigned int num_plane; + unsigned long size, buf_size = 0, plane_size, img_size = 0; + unsigned int bpp, width, height; + int i; - mutex_lock(&c_node->mem_lock); + ipp_cfg = &c_node->property.config[m_node->ops_id]; + num_plane = drm_format_num_planes(ipp_cfg->fmt); - for_each_ipp_ops(i) { - /* source/destination memory list */ - head = &c_node->mem_list[i]; + /** + * This is a rather simplified validation of a memory node. + * It basically verifies provided gem object handles + * and the buffer sizes with respect to current configuration. + * This is not the best that can be done + * but it seems more than enough + */ + for (i = 0; i < num_plane; ++i) { + width = ipp_cfg->sz.hsize; + height = ipp_cfg->sz.vsize; + bpp = drm_format_plane_cpp(ipp_cfg->fmt, i); - if (list_empty(head)) { - DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__, - i ? "dst" : "src"); - continue; - } + /* + * The result of drm_format_plane_cpp() for chroma planes must + * be used with drm_format_xxxx_chroma_subsampling() for + * correct result. + */ + if (i > 0) { + width /= drm_format_horz_chroma_subsampling( + ipp_cfg->fmt); + height /= drm_format_vert_chroma_subsampling( + ipp_cfg->fmt); + } + plane_size = width * height * bpp; + img_size += plane_size; + + if (m_node->buf_info.handles[i]) { + size = exynos_drm_gem_get_size(drm_dev, + m_node->buf_info.handles[i], + c_node->filp); + if (plane_size > size) { + DRM_ERROR( + "buffer %d is smaller than required\n", + i); + return -EINVAL; + } - /* find memory node entry */ - list_for_each_entry(m_node, head, list) { - DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__, - i ? "dst" : "src", count[i], (int)m_node); - count[i]++; + buf_size += size; } } - DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__, - min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]), - max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST])); - - /* - * M2M operations should be need paired memory address. - * so, need to check minimum count about src, dst. - * other case not use paired memory, so use maximum count - */ - if (ipp_is_m2m_cmd(property->cmd)) - ret = min(count[EXYNOS_DRM_OPS_SRC], - count[EXYNOS_DRM_OPS_DST]); - else - ret = max(count[EXYNOS_DRM_OPS_SRC], - count[EXYNOS_DRM_OPS_DST]); - - mutex_unlock(&c_node->mem_lock); - - return ret; -} - -static struct drm_exynos_ipp_mem_node - *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node, - struct drm_exynos_ipp_queue_buf *qbuf) -{ - struct drm_exynos_ipp_mem_node *m_node; - struct list_head *head; - int count = 0; - - DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id); - - /* source/destination memory list */ - head = &c_node->mem_list[qbuf->ops_id]; - - /* find memory node from memory list */ - list_for_each_entry(m_node, head, list) { - DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n", - __func__, count++, (int)m_node); - - /* compare buffer id */ - if (m_node->buf_id == qbuf->buf_id) - return m_node; + if (buf_size < img_size) { + DRM_ERROR("size of buffers(%lu) is smaller than image(%lu)\n", + buf_size, img_size); + return -EINVAL; } - return NULL; + return 0; } -static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv, +static int ipp_put_mem_node(struct drm_device *drm_dev, struct drm_exynos_ipp_cmd_node *c_node, struct drm_exynos_ipp_mem_node *m_node) { - struct exynos_drm_ipp_ops *ops = NULL; - int ret = 0; + int i; - DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node); + DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); if (!m_node) { - DRM_ERROR("invalid queue node.\n"); + DRM_ERROR("invalid dequeue node.\n"); return -EFAULT; } - mutex_lock(&c_node->mem_lock); + DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); - DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id); - - /* get operations callback */ - ops = ippdrv->ops[m_node->ops_id]; - if (!ops) { - DRM_ERROR("not support ops.\n"); - ret = -EFAULT; - goto err_unlock; + /* put gem buffer */ + for_each_ipp_planar(i) { + unsigned long handle = m_node->buf_info.handles[i]; + if (handle) + exynos_drm_gem_put_dma_addr(drm_dev, handle, + c_node->filp); } - /* set address and enable irq */ - if (ops->set_addr) { - ret = ops->set_addr(ippdrv->dev, &m_node->buf_info, - m_node->buf_id, IPP_BUF_ENQUEUE); - if (ret) { - DRM_ERROR("failed to set addr.\n"); - goto err_unlock; - } - } + list_del(&m_node->list); + kfree(m_node); -err_unlock: - mutex_unlock(&c_node->mem_lock); - return ret; + return 0; } static struct drm_exynos_ipp_mem_node *ipp_get_mem_node(struct drm_device *drm_dev, - struct drm_file *file, struct drm_exynos_ipp_cmd_node *c_node, struct drm_exynos_ipp_queue_buf *qbuf) { struct drm_exynos_ipp_mem_node *m_node; - struct drm_exynos_ipp_buf_info buf_info; - void *addr; + struct drm_exynos_ipp_buf_info *buf_info; int i; - DRM_DEBUG_KMS("%s\n", __func__); - - mutex_lock(&c_node->mem_lock); - m_node = kzalloc(sizeof(*m_node), GFP_KERNEL); - if (!m_node) { - DRM_ERROR("failed to allocate queue node.\n"); - goto err_unlock; - } + if (!m_node) + return ERR_PTR(-ENOMEM); - /* clear base address for error handling */ - memset(&buf_info, 0x0, sizeof(buf_info)); + buf_info = &m_node->buf_info; /* operations, buffer id */ m_node->ops_id = qbuf->ops_id; m_node->prop_id = qbuf->prop_id; m_node->buf_id = qbuf->buf_id; + INIT_LIST_HEAD(&m_node->list); - DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__, - (int)m_node, qbuf->ops_id); - DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__, - qbuf->prop_id, m_node->buf_id); + DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id); + DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); for_each_ipp_planar(i) { - DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__, - i, qbuf->handle[i]); + DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]); /* get dma address by handle */ if (qbuf->handle[i]) { + dma_addr_t *addr; + addr = exynos_drm_gem_get_dma_addr(drm_dev, - qbuf->handle[i], file); + qbuf->handle[i], c_node->filp); if (IS_ERR(addr)) { DRM_ERROR("failed to get addr.\n"); - goto err_clear; + ipp_put_mem_node(drm_dev, c_node, m_node); + return ERR_PTR(-EFAULT); } - buf_info.handles[i] = qbuf->handle[i]; - buf_info.base[i] = *(dma_addr_t *) addr; - DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n", - __func__, i, buf_info.base[i], - (int)buf_info.handles[i]); + buf_info->handles[i] = qbuf->handle[i]; + buf_info->base[i] = *addr; + DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i, + buf_info->base[i], buf_info->handles[i]); } } - m_node->filp = file; - m_node->buf_info = buf_info; + mutex_lock(&c_node->mem_lock); + if (ipp_validate_mem_node(drm_dev, m_node, c_node)) { + ipp_put_mem_node(drm_dev, c_node, m_node); + mutex_unlock(&c_node->mem_lock); + return ERR_PTR(-EFAULT); + } list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]); - mutex_unlock(&c_node->mem_lock); - return m_node; -err_clear: - kfree(m_node); -err_unlock: - mutex_unlock(&c_node->mem_lock); - return ERR_PTR(-EFAULT); + return m_node; } -static int ipp_put_mem_node(struct drm_device *drm_dev, - struct drm_exynos_ipp_cmd_node *c_node, - struct drm_exynos_ipp_mem_node *m_node) +static void ipp_clean_mem_nodes(struct drm_device *drm_dev, + struct drm_exynos_ipp_cmd_node *c_node, int ops) { - int i; - - DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node); - - if (!m_node) { - DRM_ERROR("invalid dequeue node.\n"); - return -EFAULT; - } - - if (list_empty(&m_node->list)) { - DRM_ERROR("empty memory node.\n"); - return -ENOMEM; - } + struct drm_exynos_ipp_mem_node *m_node, *tm_node; + struct list_head *head = &c_node->mem_list[ops]; mutex_lock(&c_node->mem_lock); - DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id); + list_for_each_entry_safe(m_node, tm_node, head, list) { + int ret; - /* put gem buffer */ - for_each_ipp_planar(i) { - unsigned long handle = m_node->buf_info.handles[i]; - if (handle) - exynos_drm_gem_put_dma_addr(drm_dev, handle, - m_node->filp); + ret = ipp_put_mem_node(drm_dev, c_node, m_node); + if (ret) + DRM_ERROR("failed to put m_node.\n"); } - /* delete list in queue */ - list_del(&m_node->list); - kfree(m_node); - mutex_unlock(&c_node->mem_lock); - - return 0; } static void ipp_free_event(struct drm_pending_event *event) @@ -817,22 +624,18 @@ } static int ipp_get_event(struct drm_device *drm_dev, - struct drm_file *file, struct drm_exynos_ipp_cmd_node *c_node, struct drm_exynos_ipp_queue_buf *qbuf) { struct drm_exynos_ipp_send_event *e; unsigned long flags; - DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__, - qbuf->ops_id, qbuf->buf_id); + DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id); e = kzalloc(sizeof(*e), GFP_KERNEL); - if (!e) { - DRM_ERROR("failed to allocate event.\n"); spin_lock_irqsave(&drm_dev->event_lock, flags); - file->event_space += sizeof(e->event); + c_node->filp->event_space += sizeof(e->event); spin_unlock_irqrestore(&drm_dev->event_lock, flags); return -ENOMEM; } @@ -844,9 +647,11 @@ e->event.prop_id = qbuf->prop_id; e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id; e->base.event = &e->event.base; - e->base.file_priv = file; + e->base.file_priv = c_node->filp; e->base.destroy = ipp_free_event; + mutex_lock(&c_node->event_lock); list_add_tail(&e->base.link, &c_node->event_list); + mutex_unlock(&c_node->event_lock); return 0; } @@ -857,19 +662,12 @@ struct drm_exynos_ipp_send_event *e, *te; int count = 0; - DRM_DEBUG_KMS("%s\n", __func__); - - if (list_empty(&c_node->event_list)) { - DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__); - return; - } - + mutex_lock(&c_node->event_lock); list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { - DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n", - __func__, count++, (int)e); + DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); /* - * quf == NULL condition means all event deletion. + * qbuf == NULL condition means all event deletion. * stop operations want to delete all event list. * another case delete only same buf id. */ @@ -885,9 +683,122 @@ /* delete list */ list_del(&e->base.link); kfree(e); - return; + goto out_unlock; + } + } + +out_unlock: + mutex_unlock(&c_node->event_lock); + return; +} + +static void ipp_clean_cmd_node(struct ipp_context *ctx, + struct drm_exynos_ipp_cmd_node *c_node) +{ + int i; + + /* cancel works */ + cancel_work_sync(&c_node->start_work->work); + cancel_work_sync(&c_node->stop_work->work); + cancel_work_sync(&c_node->event_work->work); + + /* put event */ + ipp_put_event(c_node, NULL); + + for_each_ipp_ops(i) + ipp_clean_mem_nodes(ctx->subdrv.drm_dev, c_node, i); + + /* delete list */ + list_del(&c_node->list); + + ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, + c_node->property.prop_id); + + /* destroy mutex */ + mutex_destroy(&c_node->lock); + mutex_destroy(&c_node->mem_lock); + mutex_destroy(&c_node->event_lock); + + /* free command node */ + kfree(c_node->start_work); + kfree(c_node->stop_work); + kfree(c_node->event_work); + kfree(c_node); +} + +static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node) +{ + switch (c_node->property.cmd) { + case IPP_CMD_WB: + return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]); + case IPP_CMD_OUTPUT: + return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]); + case IPP_CMD_M2M: + default: + return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) && + !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]); + } +} + +static struct drm_exynos_ipp_mem_node + *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node, + struct drm_exynos_ipp_queue_buf *qbuf) +{ + struct drm_exynos_ipp_mem_node *m_node; + struct list_head *head; + int count = 0; + + DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id); + + /* source/destination memory list */ + head = &c_node->mem_list[qbuf->ops_id]; + + /* find memory node from memory list */ + list_for_each_entry(m_node, head, list) { + DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node); + + /* compare buffer id */ + if (m_node->buf_id == qbuf->buf_id) + return m_node; + } + + return NULL; +} + +static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv, + struct drm_exynos_ipp_cmd_node *c_node, + struct drm_exynos_ipp_mem_node *m_node) +{ + struct exynos_drm_ipp_ops *ops = NULL; + int ret = 0; + + DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); + + if (!m_node) { + DRM_ERROR("invalid queue node.\n"); + return -EFAULT; + } + + DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); + + /* get operations callback */ + ops = ippdrv->ops[m_node->ops_id]; + if (!ops) { + DRM_ERROR("not support ops.\n"); + return -EFAULT; + } + + /* set address and enable irq */ + if (ops->set_addr) { + ret = ops->set_addr(ippdrv->dev, &m_node->buf_info, + m_node->buf_id, IPP_BUF_ENQUEUE); + if (ret) { + DRM_ERROR("failed to set addr.\n"); + return ret; } } + + return ret; } static void ipp_handle_cmd_work(struct device *dev, @@ -899,7 +810,7 @@ cmd_work->ippdrv = ippdrv; cmd_work->c_node = c_node; - queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work); + queue_work(ctx->cmd_workq, &cmd_work->work); } static int ipp_queue_buf_with_run(struct device *dev, @@ -912,8 +823,6 @@ struct exynos_drm_ipp_ops *ops; int ret; - DRM_DEBUG_KMS("%s\n", __func__); - ippdrv = ipp_find_drv_by_handle(qbuf->prop_id); if (IS_ERR(ippdrv)) { DRM_ERROR("failed to get ipp driver.\n"); @@ -929,12 +838,14 @@ property = &c_node->property; if (c_node->state != IPP_STATE_START) { - DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__); + DRM_DEBUG_KMS("bypass for invalid state.\n"); return 0; } + mutex_lock(&c_node->mem_lock); if (!ipp_check_mem_list(c_node)) { - DRM_DEBUG_KMS("%s:empty memory.\n", __func__); + mutex_unlock(&c_node->mem_lock); + DRM_DEBUG_KMS("empty memory.\n"); return 0; } @@ -950,10 +861,12 @@ } else { ret = ipp_set_mem_node(ippdrv, c_node, m_node); if (ret) { + mutex_unlock(&c_node->mem_lock); DRM_ERROR("failed to set m node.\n"); return ret; } } + mutex_unlock(&c_node->mem_lock); return 0; } @@ -964,33 +877,28 @@ { struct drm_exynos_ipp_mem_node *m_node, *tm_node; - DRM_DEBUG_KMS("%s\n", __func__); - - if (!list_empty(&c_node->mem_list[qbuf->ops_id])) { - /* delete list */ - list_for_each_entry_safe(m_node, tm_node, - &c_node->mem_list[qbuf->ops_id], list) { - if (m_node->buf_id == qbuf->buf_id && - m_node->ops_id == qbuf->ops_id) - ipp_put_mem_node(drm_dev, c_node, m_node); - } + /* delete list */ + mutex_lock(&c_node->mem_lock); + list_for_each_entry_safe(m_node, tm_node, + &c_node->mem_list[qbuf->ops_id], list) { + if (m_node->buf_id == qbuf->buf_id && + m_node->ops_id == qbuf->ops_id) + ipp_put_mem_node(drm_dev, c_node, m_node); } + mutex_unlock(&c_node->mem_lock); } int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data, struct drm_file *file) { struct drm_exynos_file_private *file_priv = file->driver_priv; - struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; - struct device *dev = priv->dev; + struct device *dev = file_priv->ipp_dev; struct ipp_context *ctx = get_ipp_context(dev); struct drm_exynos_ipp_queue_buf *qbuf = data; struct drm_exynos_ipp_cmd_node *c_node; struct drm_exynos_ipp_mem_node *m_node; int ret; - DRM_DEBUG_KMS("%s\n", __func__); - if (!qbuf) { DRM_ERROR("invalid buf parameter.\n"); return -EINVAL; @@ -1001,23 +909,23 @@ return -EINVAL; } - DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n", - __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src", + DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n", + qbuf->prop_id, qbuf->ops_id ? "dst" : "src", qbuf->buf_id, qbuf->buf_type); /* find command node */ c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, qbuf->prop_id); - if (!c_node) { + if (!c_node || c_node->filp != file) { DRM_ERROR("failed to get command node.\n"); - return -EFAULT; + return -ENODEV; } /* buffer control */ switch (qbuf->buf_type) { case IPP_BUF_ENQUEUE: /* get memory node */ - m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf); + m_node = ipp_get_mem_node(drm_dev, c_node, qbuf); if (IS_ERR(m_node)) { DRM_ERROR("failed to get m_node.\n"); return PTR_ERR(m_node); @@ -1030,7 +938,7 @@ */ if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) { /* get event for destination buffer */ - ret = ipp_get_event(drm_dev, file, c_node, qbuf); + ret = ipp_get_event(drm_dev, c_node, qbuf); if (ret) { DRM_ERROR("failed to get event.\n"); goto err_clean_node; @@ -1048,7 +956,7 @@ } break; case IPP_BUF_DEQUEUE: - mutex_lock(&c_node->cmd_lock); + mutex_lock(&c_node->lock); /* put event for destination buffer */ if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) @@ -1056,7 +964,7 @@ ipp_clean_queue_buf(drm_dev, c_node, qbuf); - mutex_unlock(&c_node->cmd_lock); + mutex_unlock(&c_node->lock); break; default: DRM_ERROR("invalid buffer control.\n"); @@ -1075,8 +983,6 @@ static bool exynos_drm_ipp_check_valid(struct device *dev, enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state) { - DRM_DEBUG_KMS("%s\n", __func__); - if (ctrl != IPP_CTRL_PLAY) { if (pm_runtime_suspended(dev)) { DRM_ERROR("pm:runtime_suspended.\n"); @@ -1104,7 +1010,6 @@ default: DRM_ERROR("invalid state.\n"); goto err_status; - break; } return true; @@ -1118,16 +1023,13 @@ struct drm_file *file) { struct drm_exynos_file_private *file_priv = file->driver_priv; - struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; struct exynos_drm_ippdrv *ippdrv = NULL; - struct device *dev = priv->dev; + struct device *dev = file_priv->ipp_dev; struct ipp_context *ctx = get_ipp_context(dev); struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data; struct drm_exynos_ipp_cmd_work *cmd_work; struct drm_exynos_ipp_cmd_node *c_node; - DRM_DEBUG_KMS("%s\n", __func__); - if (!ctx) { DRM_ERROR("invalid context.\n"); return -EINVAL; @@ -1138,7 +1040,7 @@ return -EINVAL; } - DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__, + DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n", cmd_ctrl->ctrl, cmd_ctrl->prop_id); ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id); @@ -1149,9 +1051,9 @@ c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, cmd_ctrl->prop_id); - if (!c_node) { + if (!c_node || c_node->filp != file) { DRM_ERROR("invalid command node list.\n"); - return -EINVAL; + return -ENODEV; } if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, @@ -1164,12 +1066,12 @@ case IPP_CTRL_PLAY: if (pm_runtime_suspended(ippdrv->dev)) pm_runtime_get_sync(ippdrv->dev); + c_node->state = IPP_STATE_START; cmd_work = c_node->start_work; cmd_work->ctrl = cmd_ctrl->ctrl; ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); - c_node->state = IPP_STATE_START; break; case IPP_CTRL_STOP: cmd_work = c_node->stop_work; @@ -1184,10 +1086,12 @@ c_node->state = IPP_STATE_STOP; ippdrv->dedicated = false; - ipp_clean_cmd_node(c_node); + mutex_lock(&ippdrv->cmd_lock); + ipp_clean_cmd_node(ctx, c_node); if (list_empty(&ippdrv->cmd_list)) pm_runtime_put_sync(ippdrv->dev); + mutex_unlock(&ippdrv->cmd_lock); break; case IPP_CTRL_PAUSE: cmd_work = c_node->stop_work; @@ -1213,7 +1117,7 @@ return -EINVAL; } - DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__, + DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n", cmd_ctrl->ctrl, cmd_ctrl->prop_id); return 0; @@ -1249,12 +1153,11 @@ return -EINVAL; } - DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id); + DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); /* reset h/w block */ if (ippdrv->reset && ippdrv->reset(ippdrv->dev)) { - DRM_ERROR("failed to reset.\n"); return -EINVAL; } @@ -1272,30 +1175,24 @@ /* set format */ if (ops->set_fmt) { ret = ops->set_fmt(ippdrv->dev, config->fmt); - if (ret) { - DRM_ERROR("not support format.\n"); + if (ret) return ret; - } } /* set transform for rotation, flip */ if (ops->set_transf) { ret = ops->set_transf(ippdrv->dev, config->degree, config->flip, &swap); - if (ret) { - DRM_ERROR("not support tranf.\n"); - return -EINVAL; - } + if (ret) + return ret; } /* set size */ if (ops->set_size) { ret = ops->set_size(ippdrv->dev, swap, &config->pos, &config->sz); - if (ret) { - DRM_ERROR("not support size.\n"); + if (ret) return ret; - } } } @@ -1310,14 +1207,16 @@ struct list_head *head; int ret, i; - DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id); + DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); /* store command info in ippdrv */ ippdrv->c_node = c_node; + mutex_lock(&c_node->mem_lock); if (!ipp_check_mem_list(c_node)) { - DRM_DEBUG_KMS("%s:empty memory.\n", __func__); - return -ENOMEM; + DRM_DEBUG_KMS("empty memory.\n"); + ret = -ENOMEM; + goto err_unlock; } /* set current property in ippdrv */ @@ -1325,7 +1224,7 @@ if (ret) { DRM_ERROR("failed to set property.\n"); ippdrv->c_node = NULL; - return ret; + goto err_unlock; } /* check command */ @@ -1337,19 +1236,13 @@ m_node = list_first_entry(head, struct drm_exynos_ipp_mem_node, list); - if (!m_node) { - DRM_ERROR("failed to get node.\n"); - ret = -EFAULT; - return ret; - } - DRM_DEBUG_KMS("%s:m_node[0x%x]\n", - __func__, (int)m_node); + DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node); ret = ipp_set_mem_node(ippdrv, c_node, m_node); if (ret) { DRM_ERROR("failed to set m node.\n"); - return ret; + goto err_unlock; } } break; @@ -1361,7 +1254,7 @@ ret = ipp_set_mem_node(ippdrv, c_node, m_node); if (ret) { DRM_ERROR("failed to set m node.\n"); - return ret; + goto err_unlock; } } break; @@ -1373,126 +1266,79 @@ ret = ipp_set_mem_node(ippdrv, c_node, m_node); if (ret) { DRM_ERROR("failed to set m node.\n"); - return ret; + goto err_unlock; } } break; default: DRM_ERROR("invalid operations.\n"); - return -EINVAL; + ret = -EINVAL; + goto err_unlock; } + mutex_unlock(&c_node->mem_lock); - DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd); + DRM_DEBUG_KMS("cmd[%d]\n", property->cmd); /* start operations */ if (ippdrv->start) { ret = ippdrv->start(ippdrv->dev, property->cmd); if (ret) { DRM_ERROR("failed to start ops.\n"); + ippdrv->c_node = NULL; return ret; } } return 0; + +err_unlock: + mutex_unlock(&c_node->mem_lock); + ippdrv->c_node = NULL; + return ret; } static int ipp_stop_property(struct drm_device *drm_dev, struct exynos_drm_ippdrv *ippdrv, struct drm_exynos_ipp_cmd_node *c_node) { - struct drm_exynos_ipp_mem_node *m_node, *tm_node; struct drm_exynos_ipp_property *property = &c_node->property; - struct list_head *head; - int ret = 0, i; + int i; - DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id); + DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); - /* put event */ - ipp_put_event(c_node, NULL); + /* stop operations */ + if (ippdrv->stop) + ippdrv->stop(ippdrv->dev, property->cmd); /* check command */ switch (property->cmd) { case IPP_CMD_M2M: - for_each_ipp_ops(i) { - /* source/destination memory list */ - head = &c_node->mem_list[i]; - - if (list_empty(head)) { - DRM_DEBUG_KMS("%s:mem_list is empty.\n", - __func__); - break; - } - - list_for_each_entry_safe(m_node, tm_node, - head, list) { - ret = ipp_put_mem_node(drm_dev, c_node, - m_node); - if (ret) { - DRM_ERROR("failed to put m_node.\n"); - goto err_clear; - } - } - } + for_each_ipp_ops(i) + ipp_clean_mem_nodes(drm_dev, c_node, i); break; case IPP_CMD_WB: - /* destination memory list */ - head = &c_node->mem_list[EXYNOS_DRM_OPS_DST]; - - if (list_empty(head)) { - DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__); - break; - } - - list_for_each_entry_safe(m_node, tm_node, head, list) { - ret = ipp_put_mem_node(drm_dev, c_node, m_node); - if (ret) { - DRM_ERROR("failed to put m_node.\n"); - goto err_clear; - } - } + ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_DST); break; case IPP_CMD_OUTPUT: - /* source memory list */ - head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; - - if (list_empty(head)) { - DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__); - break; - } - - list_for_each_entry_safe(m_node, tm_node, head, list) { - ret = ipp_put_mem_node(drm_dev, c_node, m_node); - if (ret) { - DRM_ERROR("failed to put m_node.\n"); - goto err_clear; - } - } + ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_SRC); break; default: DRM_ERROR("invalid operations.\n"); - ret = -EINVAL; - goto err_clear; + return -EINVAL; } -err_clear: - /* stop operations */ - if (ippdrv->stop) - ippdrv->stop(ippdrv->dev, property->cmd); - - return ret; + return 0; } void ipp_sched_cmd(struct work_struct *work) { struct drm_exynos_ipp_cmd_work *cmd_work = - (struct drm_exynos_ipp_cmd_work *)work; + container_of(work, struct drm_exynos_ipp_cmd_work, work); struct exynos_drm_ippdrv *ippdrv; struct drm_exynos_ipp_cmd_node *c_node; struct drm_exynos_ipp_property *property; int ret; - DRM_DEBUG_KMS("%s\n", __func__); - ippdrv = cmd_work->ippdrv; if (!ippdrv) { DRM_ERROR("invalid ippdrv list.\n"); @@ -1505,7 +1351,7 @@ return; } - mutex_lock(&c_node->cmd_lock); + mutex_lock(&c_node->lock); property = &c_node->property; @@ -1550,10 +1396,10 @@ break; } - DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl); + DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl); err_unlock: - mutex_unlock(&c_node->cmd_lock); + mutex_unlock(&c_node->lock); } static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv, @@ -1571,8 +1417,7 @@ int ret, i; for_each_ipp_ops(i) - DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__, - i ? "dst" : "src", buf_id[i]); + DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]); if (!drm_dev) { DRM_ERROR("failed to get drm_dev.\n"); @@ -1584,14 +1429,18 @@ return -EINVAL; } + mutex_lock(&c_node->event_lock); if (list_empty(&c_node->event_list)) { - DRM_DEBUG_KMS("%s:event list is empty.\n", __func__); - return 0; + DRM_DEBUG_KMS("event list is empty.\n"); + ret = 0; + goto err_event_unlock; } + mutex_lock(&c_node->mem_lock); if (!ipp_check_mem_list(c_node)) { - DRM_DEBUG_KMS("%s:empty memory.\n", __func__); - return 0; + DRM_DEBUG_KMS("empty memory.\n"); + ret = 0; + goto err_mem_unlock; } /* check command */ @@ -1603,13 +1452,9 @@ m_node = list_first_entry(head, struct drm_exynos_ipp_mem_node, list); - if (!m_node) { - DRM_ERROR("empty memory node.\n"); - return -ENOMEM; - } tbuf_id[i] = m_node->buf_id; - DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__, + DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", tbuf_id[i]); ret = ipp_put_mem_node(drm_dev, c_node, m_node); @@ -1627,7 +1472,8 @@ m_node = ipp_find_mem_node(c_node, &qbuf); if (!m_node) { DRM_ERROR("empty memory node.\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err_mem_unlock; } tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id; @@ -1642,10 +1488,6 @@ m_node = list_first_entry(head, struct drm_exynos_ipp_mem_node, list); - if (!m_node) { - DRM_ERROR("empty memory node.\n"); - return -ENOMEM; - } tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id; @@ -1655,8 +1497,10 @@ break; default: DRM_ERROR("invalid operations.\n"); - return -EINVAL; + ret = -EINVAL; + goto err_mem_unlock; } + mutex_unlock(&c_node->mem_lock); if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST]) DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n", @@ -1671,14 +1515,8 @@ e = list_first_entry(&c_node->event_list, struct drm_exynos_ipp_send_event, base.link); - if (!e) { - DRM_ERROR("empty event.\n"); - return -EINVAL; - } - do_gettimeofday(&now); - DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n" - , __func__, now.tv_sec, now.tv_usec); + DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec); e->event.tv_sec = now.tv_sec; e->event.tv_usec = now.tv_usec; e->event.prop_id = property->prop_id; @@ -1691,17 +1529,24 @@ list_move_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); spin_unlock_irqrestore(&drm_dev->event_lock, flags); + mutex_unlock(&c_node->event_lock); - DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__, + DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n", property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]); return 0; + +err_mem_unlock: + mutex_unlock(&c_node->mem_lock); +err_event_unlock: + mutex_unlock(&c_node->event_lock); + return ret; } void ipp_sched_event(struct work_struct *work) { struct drm_exynos_ipp_event_work *event_work = - (struct drm_exynos_ipp_event_work *)work; + container_of(work, struct drm_exynos_ipp_event_work, work); struct exynos_drm_ippdrv *ippdrv; struct drm_exynos_ipp_cmd_node *c_node; int ret; @@ -1711,8 +1556,7 @@ return; } - DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, - event_work->buf_id[EXYNOS_DRM_OPS_DST]); + DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]); ippdrv = event_work->ippdrv; if (!ippdrv) { @@ -1733,13 +1577,11 @@ * or going out operations. */ if (c_node->state != IPP_STATE_START) { - DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n", - __func__, c_node->state, c_node->property.prop_id); + DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n", + c_node->state, c_node->property.prop_id); goto err_completion; } - mutex_lock(&c_node->event_lock); - ret = ipp_send_event(ippdrv, c_node, event_work->buf_id); if (ret) { DRM_ERROR("failed to send event.\n"); @@ -1749,8 +1591,6 @@ err_completion: if (ipp_is_m2m_cmd(c_node->property.cmd)) complete(&c_node->start_complete); - - mutex_unlock(&c_node->event_lock); } static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev) @@ -1759,27 +1599,19 @@ struct exynos_drm_ippdrv *ippdrv; int ret, count = 0; - DRM_DEBUG_KMS("%s\n", __func__); - /* get ipp driver entry */ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { ippdrv->drm_dev = drm_dev; - ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv, - &ippdrv->ipp_id); - if (ret) { + ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv); + if (ret < 0) { DRM_ERROR("failed to create id.\n"); - goto err_idr; + goto err; } + ippdrv->prop_list.ipp_id = ret; - DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__, - count++, (int)ippdrv, ippdrv->ipp_id); - - if (ippdrv->ipp_id == 0) { - DRM_ERROR("failed to get ipp_id[%d]\n", - ippdrv->ipp_id); - goto err_idr; - } + DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n", + count++, (int)ippdrv, ret); /* store parent device for node */ ippdrv->parent_dev = dev; @@ -1788,40 +1620,41 @@ ippdrv->event_workq = ctx->event_workq; ippdrv->sched_event = ipp_sched_event; INIT_LIST_HEAD(&ippdrv->cmd_list); + mutex_init(&ippdrv->cmd_lock); - if (is_drm_iommu_supported(drm_dev)) { - ret = drm_iommu_attach_device(drm_dev, ippdrv->dev); - if (ret) { - DRM_ERROR("failed to activate iommu\n"); - goto err_iommu; - } + ret = drm_iommu_attach_device(drm_dev, ippdrv->dev); + if (ret) { + DRM_ERROR("failed to activate iommu\n"); + goto err; } } return 0; -err_iommu: +err: /* get ipp driver entry */ - list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list) - if (is_drm_iommu_supported(drm_dev)) - drm_iommu_detach_device(drm_dev, ippdrv->dev); + list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list, + drv_list) { + drm_iommu_detach_device(drm_dev, ippdrv->dev); + + ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock, + ippdrv->prop_list.ipp_id); + } -err_idr: - idr_destroy(&ctx->ipp_idr); - idr_destroy(&ctx->prop_idr); return ret; } static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev) { - struct exynos_drm_ippdrv *ippdrv; - - DRM_DEBUG_KMS("%s\n", __func__); + struct exynos_drm_ippdrv *ippdrv, *t; + struct ipp_context *ctx = get_ipp_context(dev); /* get ipp driver entry */ - list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { - if (is_drm_iommu_supported(drm_dev)) - drm_iommu_detach_device(drm_dev, ippdrv->dev); + list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) { + drm_iommu_detach_device(drm_dev, ippdrv->dev); + + ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock, + ippdrv->prop_list.ipp_id); ippdrv->drm_dev = NULL; exynos_drm_ippdrv_unregister(ippdrv); @@ -1832,21 +1665,10 @@ struct drm_file *file) { struct drm_exynos_file_private *file_priv = file->driver_priv; - struct exynos_drm_ipp_private *priv; - DRM_DEBUG_KMS("%s\n", __func__); + file_priv->ipp_dev = dev; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - DRM_ERROR("failed to allocate priv.\n"); - return -ENOMEM; - } - priv->dev = dev; - file_priv->ipp_priv = priv; - - INIT_LIST_HEAD(&priv->event_list); - - DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv); + DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev); return 0; } @@ -1854,29 +1676,19 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev, struct drm_file *file) { - struct drm_exynos_file_private *file_priv = file->driver_priv; - struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; struct exynos_drm_ippdrv *ippdrv = NULL; + struct ipp_context *ctx = get_ipp_context(dev); struct drm_exynos_ipp_cmd_node *c_node, *tc_node; int count = 0; - DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv); - - if (list_empty(&exynos_drm_ippdrv_list)) { - DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__); - goto err_clear; - } - list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { - if (list_empty(&ippdrv->cmd_list)) - continue; - + mutex_lock(&ippdrv->cmd_lock); list_for_each_entry_safe(c_node, tc_node, &ippdrv->cmd_list, list) { - DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", - __func__, count++, (int)ippdrv); + DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", + count++, (int)ippdrv); - if (c_node->priv == priv) { + if (c_node->filp == file) { /* * userland goto unnormal state. process killed. * and close the file. @@ -1890,15 +1702,14 @@ } ippdrv->dedicated = false; - ipp_clean_cmd_node(c_node); + ipp_clean_cmd_node(ctx, c_node); if (list_empty(&ippdrv->cmd_list)) pm_runtime_put_sync(ippdrv->dev); } } + mutex_unlock(&ippdrv->cmd_lock); } -err_clear: - kfree(priv); return; } @@ -1913,8 +1724,6 @@ if (!ctx) return -ENOMEM; - DRM_DEBUG_KMS("%s\n", __func__); - mutex_init(&ctx->ipp_lock); mutex_init(&ctx->prop_lock); @@ -1978,8 +1787,6 @@ { struct ipp_context *ctx = platform_get_drvdata(pdev); - DRM_DEBUG_KMS("%s\n", __func__); - /* unregister sub driver */ exynos_drm_subdrv_unregister(&ctx->subdrv); @@ -1997,71 +1804,12 @@ return 0; } -static int ipp_power_ctrl(struct ipp_context *ctx, bool enable) -{ - DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable); - - return 0; -} - -#ifdef CONFIG_PM_SLEEP -static int ipp_suspend(struct device *dev) -{ - struct ipp_context *ctx = get_ipp_context(dev); - - DRM_DEBUG_KMS("%s\n", __func__); - - if (pm_runtime_suspended(dev)) - return 0; - - return ipp_power_ctrl(ctx, false); -} - -static int ipp_resume(struct device *dev) -{ - struct ipp_context *ctx = get_ipp_context(dev); - - DRM_DEBUG_KMS("%s\n", __func__); - - if (!pm_runtime_suspended(dev)) - return ipp_power_ctrl(ctx, true); - - return 0; -} -#endif - -#ifdef CONFIG_PM_RUNTIME -static int ipp_runtime_suspend(struct device *dev) -{ - struct ipp_context *ctx = get_ipp_context(dev); - - DRM_DEBUG_KMS("%s\n", __func__); - - return ipp_power_ctrl(ctx, false); -} - -static int ipp_runtime_resume(struct device *dev) -{ - struct ipp_context *ctx = get_ipp_context(dev); - - DRM_DEBUG_KMS("%s\n", __func__); - - return ipp_power_ctrl(ctx, true); -} -#endif - -static const struct dev_pm_ops ipp_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume) - SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL) -}; - struct platform_driver ipp_driver = { .probe = ipp_probe, .remove = ipp_remove, .driver = { .name = "exynos-drm-ipp", .owner = THIS_MODULE, - .pm = &ipp_pm_ops, }, };