--- zzzz-none-000/linux-4.4.271/drivers/staging/android/ion/ion.c 2021-06-03 06:22:09.000000000 +0000 +++ maple-fsgw-759/linux-4.4.271/drivers/staging/android/ion/ion.c 2023-12-20 10:37:27.000000000 +0000 @@ -3,6 +3,7 @@ * drivers/staging/android/ion/ion.c * * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -16,7 +17,6 @@ */ #include -#include #include #include #include @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -37,6 +38,10 @@ #include #include #include +#include +#include +#include + #include "ion.h" #include "ion_priv.h" @@ -87,7 +92,7 @@ struct rb_root handles; struct idr idr; struct mutex lock; - const char *name; + char *name; char *display_name; int display_serial; struct task_struct *task; @@ -109,6 +114,7 @@ */ struct ion_handle { struct kref ref; + unsigned int user_ref_count; struct ion_client *client; struct ion_buffer *buffer; struct rb_node node; @@ -208,6 +214,8 @@ buffer->dev = dev; buffer->size = len; + buffer->flags = flags; + INIT_LIST_HEAD(&buffer->vmas); table = heap->ops->map_dma(heap, buffer); if (WARN_ONCE(table == NULL, @@ -238,9 +246,6 @@ } } - buffer->dev = dev; - buffer->size = len; - INIT_LIST_HEAD(&buffer->vmas); mutex_init(&buffer->lock); /* * this will set up dma addresses for the sglist -- it is not @@ -259,6 +264,7 @@ mutex_lock(&dev->buffer_lock); ion_buffer_add(dev, buffer); mutex_unlock(&dev->buffer_lock); + atomic_long_add(len, &heap->total_allocated); return buffer; err: @@ -275,6 +281,8 @@ if (WARN_ON(buffer->kmap_cnt > 0)) buffer->heap->ops->unmap_kernel(buffer->heap, buffer); buffer->heap->ops->unmap_dma(buffer->heap, buffer); + + atomic_long_sub(buffer->size, &buffer->heap->total_allocated); buffer->heap->ops->free(buffer); vfree(buffer->pages); kfree(buffer); @@ -286,6 +294,8 @@ struct ion_heap *heap = buffer->heap; struct ion_device *dev = buffer->dev; + msm_dma_buf_freed(buffer); + mutex_lock(&dev->buffer_lock); rb_erase(&buffer->node, &dev->buffers); mutex_unlock(&dev->buffer_lock); @@ -309,6 +319,9 @@ static void ion_buffer_add_to_handle(struct ion_buffer *buffer) { mutex_lock(&buffer->lock); + if (buffer->handle_count == 0) + atomic_long_add(buffer->size, &buffer->heap->total_handles); + buffer->handle_count++; mutex_unlock(&buffer->lock); } @@ -333,6 +346,7 @@ task = current->group_leader; get_task_comm(buffer->task_comm, task); buffer->pid = task_pid_nr(task); + atomic_long_sub(buffer->size, &buffer->heap->total_handles); } mutex_unlock(&buffer->lock); } @@ -419,6 +433,48 @@ return ret; } +/* Must hold the client lock */ +static void user_ion_handle_get(struct ion_handle *handle) +{ + if (handle->user_ref_count++ == 0) + kref_get(&handle->ref); +} + +/* Must hold the client lock */ +static struct ion_handle *user_ion_handle_get_check_overflow( + struct ion_handle *handle) +{ + if (handle->user_ref_count + 1 == 0) + return ERR_PTR(-EOVERFLOW); + user_ion_handle_get(handle); + return handle; +} + +/* passes a kref to the user ref count. + * We know we're holding a kref to the object before and + * after this call, so no need to reverify handle. + * Caller must hold the client lock, except for ION_IOC_ALLOC. + */ +static struct ion_handle *pass_to_user(struct ion_handle *handle) +{ + struct ion_handle *ret; + + ret = user_ion_handle_get_check_overflow(handle); + ion_handle_put_nolock(handle); + return ret; +} + +/* Must hold the client lock */ +static int user_ion_handle_put_nolock(struct ion_handle *handle) +{ + int ret = 0; + + if (--handle->user_ref_count == 0) + ret = ion_handle_put_nolock(handle); + + return ret; +} + static struct ion_handle *ion_handle_lookup(struct ion_client *client, struct ion_buffer *buffer) { @@ -437,8 +493,8 @@ return ERR_PTR(-EINVAL); } -static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, - int id) +struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, + int id) { struct ion_handle *handle; @@ -449,8 +505,7 @@ return ERR_PTR(-EINVAL); } -static bool ion_handle_validate(struct ion_client *client, - struct ion_handle *handle) +bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) { WARN_ON(!mutex_is_locked(&client->lock)); return idr_find(&client->idr, handle->id) == handle; @@ -487,15 +542,28 @@ return 0; } -struct ion_handle *ion_alloc(struct ion_client *client, size_t len, +static struct ion_handle *__ion_alloc(struct ion_client *client, size_t len, size_t align, unsigned int heap_id_mask, - unsigned int flags) + unsigned int flags, bool grab_handle) { struct ion_handle *handle; struct ion_device *dev = client->dev; struct ion_buffer *buffer = NULL; struct ion_heap *heap; int ret; + const unsigned int MAX_DBG_STR_LEN = 64; + char dbg_str[MAX_DBG_STR_LEN]; + unsigned int dbg_str_idx = 0; + + dbg_str[0] = '\0'; + + /* + * For now, we don't want to fault in pages individually since + * clients are already doing manual cache maintenance. In + * other words, the implicit caching infrastructure is in + * place (in code) but should not be used. + */ + flags |= ION_FLAG_CACHED_NEEDS_SYNC; pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__, len, align, heap_id_mask, flags); @@ -515,17 +583,49 @@ /* if the caller didn't specify this heap id */ if (!((1 << heap->id) & heap_id_mask)) continue; + trace_ion_alloc_buffer_start(client->name, heap->name, len, + heap_id_mask, flags); buffer = ion_buffer_create(heap, dev, len, align, flags); + trace_ion_alloc_buffer_end(client->name, heap->name, len, + heap_id_mask, flags); if (!IS_ERR(buffer)) break; + + trace_ion_alloc_buffer_fallback(client->name, heap->name, len, + heap_id_mask, flags, + PTR_ERR(buffer)); + if (dbg_str_idx < MAX_DBG_STR_LEN) { + unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1; + int ret_value = snprintf(&dbg_str[dbg_str_idx], + len_left, "%s ", heap->name); + if (ret_value >= len_left) { + /* overflow */ + dbg_str[MAX_DBG_STR_LEN-1] = '\0'; + dbg_str_idx = MAX_DBG_STR_LEN; + } else if (ret_value >= 0) { + dbg_str_idx += ret_value; + } else { + /* error */ + dbg_str[MAX_DBG_STR_LEN-1] = '\0'; + } + } } up_read(&dev->lock); - if (buffer == NULL) + if (buffer == NULL) { + trace_ion_alloc_buffer_fail(client->name, dbg_str, len, + heap_id_mask, flags, -ENODEV); return ERR_PTR(-ENODEV); + } - if (IS_ERR(buffer)) + if (IS_ERR(buffer)) { + trace_ion_alloc_buffer_fail(client->name, dbg_str, len, + heap_id_mask, flags, + PTR_ERR(buffer)); + pr_debug("ION is unable to allocate 0x%zx bytes (alignment: 0x%zx) from heap(s) %sfor client %s\n", + len, align, dbg_str, client->name); return ERR_CAST(buffer); + } handle = ion_handle_create(client, buffer); @@ -539,6 +639,8 @@ return handle; mutex_lock(&client->lock); + if (grab_handle) + ion_handle_get(handle); ret = ion_handle_add(client, handle); mutex_unlock(&client->lock); if (ret) { @@ -548,16 +650,22 @@ return handle; } + +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, + size_t align, unsigned int heap_id_mask, + unsigned int flags) +{ + return __ion_alloc(client, len, align, heap_id_mask, flags, false); +} EXPORT_SYMBOL(ion_alloc); -static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle) +void ion_free_nolock(struct ion_client *client, struct ion_handle *handle) { bool valid_handle; BUG_ON(client != handle->client); valid_handle = ion_handle_validate(client, handle); - if (!valid_handle) { WARN(1, "%s: invalid handle passed to free.\n", __func__); return; @@ -565,6 +673,25 @@ ion_handle_put_nolock(handle); } +static void user_ion_free_nolock(struct ion_client *client, + struct ion_handle *handle) +{ + bool valid_handle; + + WARN_ON(client != handle->client); + + valid_handle = ion_handle_validate(client, handle); + if (!valid_handle) { + WARN(1, "%s: invalid handle passed to free.\n", __func__); + return; + } + if (handle->user_ref_count == 0) { + WARN(1, "%s: User does not have access!\n", __func__); + return; + } + user_ion_handle_put_nolock(handle); +} + void ion_free(struct ion_client *client, struct ion_handle *handle) { BUG_ON(client != handle->client); @@ -575,15 +702,17 @@ } EXPORT_SYMBOL(ion_free); -int ion_phys(struct ion_client *client, struct ion_handle *handle, - ion_phys_addr_t *addr, size_t *len) +static int __ion_phys(struct ion_client *client, struct ion_handle *handle, + ion_phys_addr_t *addr, size_t *len, bool lock_client) { struct ion_buffer *buffer; int ret; - mutex_lock(&client->lock); + if (lock_client) + mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { - mutex_unlock(&client->lock); + if (lock_client) + mutex_unlock(&client->lock); return -EINVAL; } @@ -592,15 +721,29 @@ if (!buffer->heap->ops->phys) { pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n", __func__, buffer->heap->name, buffer->heap->type); - mutex_unlock(&client->lock); + if (lock_client) + mutex_unlock(&client->lock); return -ENODEV; } - mutex_unlock(&client->lock); + if (lock_client) + mutex_unlock(&client->lock); ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); return ret; } + +int ion_phys(struct ion_client *client, struct ion_handle *handle, + ion_phys_addr_t *addr, size_t *len) +{ + return __ion_phys(client, handle, addr, len, true); +} EXPORT_SYMBOL(ion_phys); +int ion_phys_nolock(struct ion_client *client, struct ion_handle *handle, + ion_phys_addr_t *addr, size_t *len) +{ + return __ion_phys(client, handle, addr, len, false); +} + static void *ion_buffer_kmap_get(struct ion_buffer *buffer) { void *vaddr; @@ -701,32 +844,66 @@ } EXPORT_SYMBOL(ion_unmap_kernel); +static struct mutex debugfs_mutex; +static struct rb_root *ion_root_client; +static int is_client_alive(struct ion_client *client) +{ + struct rb_node *node; + struct ion_client *tmp; + struct ion_device *dev; + + node = ion_root_client->rb_node; + dev = container_of(ion_root_client, struct ion_device, clients); + + down_read(&dev->lock); + while (node) { + tmp = rb_entry(node, struct ion_client, node); + if (client < tmp) { + node = node->rb_left; + } else if (client > tmp) { + node = node->rb_right; + } else { + up_read(&dev->lock); + return 1; + } + } + + up_read(&dev->lock); + return 0; +} + static int ion_debug_client_show(struct seq_file *s, void *unused) { struct ion_client *client = s->private; struct rb_node *n; - size_t sizes[ION_NUM_HEAP_IDS] = {0}; - const char *names[ION_NUM_HEAP_IDS] = {NULL}; - int i; + + mutex_lock(&debugfs_mutex); + if (!is_client_alive(client)) { + seq_printf(s, "ion_client 0x%pK dead, can't dump its buffers\n", + client); + mutex_unlock(&debugfs_mutex); + return 0; + } + + seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s\n", + "heap_name", "size_in_bytes", "handle refcount", + "buffer"); mutex_lock(&client->lock); for (n = rb_first(&client->handles); n; n = rb_next(n)) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); - unsigned int id = handle->buffer->heap->id; - if (!names[id]) - names[id] = handle->buffer->heap->name; - sizes[id] += handle->buffer->size; - } - mutex_unlock(&client->lock); + seq_printf(s, "%16.16s: %16zx : %16d : %12pK", + handle->buffer->heap->name, + handle->buffer->size, + atomic_read(&handle->ref.refcount), + handle->buffer); - seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); - for (i = 0; i < ION_NUM_HEAP_IDS; i++) { - if (!names[i]) - continue; - seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]); + seq_printf(s, "\n"); } + mutex_unlock(&client->lock); + mutex_unlock(&debugfs_mutex); return 0; } @@ -797,6 +974,7 @@ client->handles = RB_ROOT; idr_init(&client->idr); mutex_init(&client->lock); + client->task = task; client->pid = pid; client->name = kstrdup(name, GFP_KERNEL); @@ -855,7 +1033,7 @@ struct ion_device *dev = client->dev; struct rb_node *n; - pr_debug("%s: %d\n", __func__, __LINE__); + mutex_lock(&debugfs_mutex); while ((n = rb_first(&client->handles))) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); @@ -869,14 +1047,67 @@ put_task_struct(client->task); rb_erase(&client->node, &dev->clients); debugfs_remove_recursive(client->debug_root); + up_write(&dev->lock); kfree(client->display_name); kfree(client->name); kfree(client); + mutex_unlock(&debugfs_mutex); } EXPORT_SYMBOL(ion_client_destroy); +int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle, + unsigned long *flags) +{ + struct ion_buffer *buffer; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to %s.\n", + __func__, __func__); + mutex_unlock(&client->lock); + return -EINVAL; + } + buffer = handle->buffer; + mutex_lock(&buffer->lock); + *flags = buffer->flags; + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + + return 0; +} +EXPORT_SYMBOL(ion_handle_get_flags); + +int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle, + size_t *size) +{ + struct ion_buffer *buffer; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to %s.\n", + __func__, __func__); + mutex_unlock(&client->lock); + return -EINVAL; + } + buffer = handle->buffer; + mutex_lock(&buffer->lock); + *size = buffer->size; + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + + return 0; +} +EXPORT_SYMBOL(ion_handle_get_size); + +/** + * ion_sg_table - get an sg_table for the buffer + * + * NOTE: most likely you should NOT being using this API. + * You should be using Ion as a DMA Buf exporter and using + * the sg_table returned by dma_buf_map_attachment. + */ struct sg_table *ion_sg_table(struct ion_client *client, struct ion_handle *handle) { @@ -897,6 +1128,60 @@ } EXPORT_SYMBOL(ion_sg_table); +struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base, + size_t chunk_size, size_t total_size) +{ + struct sg_table *table; + int i, n_chunks, ret; + struct scatterlist *sg; + + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) + return ERR_PTR(-ENOMEM); + + n_chunks = DIV_ROUND_UP(total_size, chunk_size); + pr_debug("creating sg_table with %d chunks\n", n_chunks); + + ret = sg_alloc_table(table, n_chunks, GFP_KERNEL); + if (ret) + goto err0; + + for_each_sg(table->sgl, sg, table->nents, i) { + dma_addr_t addr = buffer_base + i * chunk_size; + sg_dma_address(sg) = addr; + sg->length = chunk_size; + } + + return table; +err0: + kfree(table); + return ERR_PTR(ret); +} + +static struct sg_table *ion_dupe_sg_table(struct sg_table *orig_table) +{ + int ret, i; + struct scatterlist *sg, *sg_orig; + struct sg_table *table; + + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) + return NULL; + + ret = sg_alloc_table(table, orig_table->nents, GFP_KERNEL); + if (ret) { + kfree(table); + return NULL; + } + + sg_orig = orig_table->sgl; + for_each_sg(table->sgl, sg, table->nents, i) { + memcpy(sg, sg_orig, sizeof(*sg)); + sg_orig = sg_next(sg_orig); + } + return table; +} + static void ion_buffer_sync_for_device(struct ion_buffer *buffer, struct device *dev, enum dma_data_direction direction); @@ -906,15 +1191,22 @@ { struct dma_buf *dmabuf = attachment->dmabuf; struct ion_buffer *buffer = dmabuf->priv; + struct sg_table *table; + + table = ion_dupe_sg_table(buffer->sg_table); + if (!table) + return NULL; ion_buffer_sync_for_device(buffer, attachment->dev, direction); - return buffer->sg_table; + return table; } static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table, enum dma_data_direction direction) { + sg_free_table(table); + kfree(table); } void ion_pages_sync_for_device(struct device *dev, struct page *page, @@ -922,6 +1214,8 @@ { struct scatterlist sg; + WARN_ONCE(!dev, "A device is required for dma_sync\n"); + sg_init_table(&sg, 1); sg_set_page(&sg, page, size, 0); /* @@ -946,9 +1240,6 @@ int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; int i; - pr_debug("%s: syncing for device %s\n", __func__, - dev ? dev_name(dev) : "null"); - if (!ion_buffer_fault_user_mappings(buffer)) return; @@ -1002,7 +1293,6 @@ mutex_lock(&buffer->lock); list_add(&vma_list->list, &buffer->vmas); mutex_unlock(&buffer->lock); - pr_debug("%s: adding %p\n", __func__, vma); } static void ion_vm_close(struct vm_area_struct *vma) @@ -1010,17 +1300,18 @@ struct ion_buffer *buffer = vma->vm_private_data; struct ion_vma_list *vma_list, *tmp; - pr_debug("%s\n", __func__); mutex_lock(&buffer->lock); list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { if (vma_list->vma != vma) continue; list_del(&vma_list->list); kfree(vma_list); - pr_debug("%s: deleting %p\n", __func__, vma); break; } mutex_unlock(&buffer->lock); + + if (buffer->heap->ops->unmap_user) + buffer->heap->ops->unmap_user(buffer->heap, buffer); } static const struct vm_operations_struct ion_vma_ops = { @@ -1045,6 +1336,7 @@ VM_DONTDUMP; vma->vm_private_data = buffer; vma->vm_ops = &ion_vma_ops; + vma->vm_flags |= VM_MIXEDMAP; ion_vm_open(vma); return 0; } @@ -1183,7 +1475,6 @@ fd = dma_buf_fd(dmabuf, O_CLOEXEC); if (fd < 0) dma_buf_put(dmabuf); - return fd; } @@ -1193,13 +1484,36 @@ } EXPORT_SYMBOL(ion_share_dma_buf_fd); +bool ion_dma_buf_is_secure(struct dma_buf *dmabuf) +{ + struct ion_buffer *buffer; + enum ion_heap_type type; + + /* Return false if we didn't create the buffer */ + if (!dmabuf || dmabuf->ops != &dma_buf_ops) + return false; + + buffer = dmabuf->priv; + + if (!buffer || !buffer->heap) + return false; + + type = buffer->heap->type; + + return (type == (enum ion_heap_type)ION_HEAP_TYPE_SECURE_DMA || + type == (enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE) ? + true : false; +} +EXPORT_SYMBOL(ion_dma_buf_is_secure); + static int ion_share_dma_buf_fd_nolock(struct ion_client *client, struct ion_handle *handle) { return __ion_share_dma_buf_fd(client, handle, false); } -struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) +static struct ion_handle *__ion_import_dma_buf(struct ion_client *client, + int fd, bool lock_client) { struct dma_buf *dmabuf; struct ion_buffer *buffer; @@ -1219,25 +1533,32 @@ } buffer = dmabuf->priv; - mutex_lock(&client->lock); + if (lock_client) + mutex_lock(&client->lock); /* if a handle exists for this buffer just take a reference to it */ handle = ion_handle_lookup(client, buffer); if (!IS_ERR(handle)) { handle = ion_handle_get_check_overflow(handle); - mutex_unlock(&client->lock); + if (lock_client) + mutex_unlock(&client->lock); goto end; } handle = ion_handle_create(client, buffer); if (IS_ERR(handle)) { - mutex_unlock(&client->lock); + if (lock_client) + mutex_unlock(&client->lock); goto end; } ret = ion_handle_add(client, handle); - mutex_unlock(&client->lock); + if (lock_client) + mutex_unlock(&client->lock); if (ret) { - ion_handle_put(handle); + if (lock_client) + ion_handle_put(handle); + else + ion_handle_put_nolock(handle); handle = ERR_PTR(ret); } @@ -1245,12 +1566,24 @@ dma_buf_put(dmabuf); return handle; } + +struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) +{ + return __ion_import_dma_buf(client, fd, true); +} EXPORT_SYMBOL(ion_import_dma_buf); +struct ion_handle *ion_import_dma_buf_nolock(struct ion_client *client, int fd) +{ + return __ion_import_dma_buf(client, fd, false); +} + static int ion_sync_for_device(struct ion_client *client, int fd) { struct dma_buf *dmabuf; struct ion_buffer *buffer; + struct ion_heap *heap; + struct device *dev; dmabuf = dma_buf_get(fd); if (IS_ERR(dmabuf)) @@ -1264,8 +1597,15 @@ return -EINVAL; } buffer = dmabuf->priv; + heap = buffer->heap; + dev = heap->priv; - dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, + if (get_secure_vmid(buffer->flags) > 0) { + pr_err("%s: cannot sync a secure dmabuf\n", __func__); + dma_buf_put(dmabuf); + return -EINVAL; + } + dma_sync_sg_for_device(dev, buffer->sg_table->sgl, buffer->sg_table->nents, DMA_BIDIRECTIONAL); dma_buf_put(dmabuf); return 0; @@ -1313,13 +1653,13 @@ { struct ion_handle *handle; - handle = ion_alloc(client, data.allocation.len, + handle = __ion_alloc(client, data.allocation.len, data.allocation.align, data.allocation.heap_id_mask, - data.allocation.flags); + data.allocation.flags, true); if (IS_ERR(handle)) return PTR_ERR(handle); - + pass_to_user(handle); data.allocation.handle = handle->id; cleanup_handle = handle; @@ -1335,7 +1675,7 @@ mutex_unlock(&client->lock); return PTR_ERR(handle); } - ion_free_nolock(client, handle); + user_ion_free_nolock(client, handle); ion_handle_put_nolock(handle); mutex_unlock(&client->lock); break; @@ -1362,11 +1702,18 @@ { struct ion_handle *handle; - handle = ion_import_dma_buf(client, data.fd.fd); - if (IS_ERR(handle)) + mutex_lock(&client->lock); + handle = ion_import_dma_buf_nolock(client, data.fd.fd); + if (IS_ERR(handle)) { ret = PTR_ERR(handle); - else - data.handle.handle = handle->id; + } else { + handle = pass_to_user(handle); + if (IS_ERR(handle)) + ret = PTR_ERR(handle); + else + data.handle.handle = handle->id; + } + mutex_unlock(&client->lock); break; } case ION_IOC_SYNC: @@ -1382,17 +1729,32 @@ data.custom.arg); break; } + case ION_IOC_CLEAN_CACHES: + return client->dev->custom_ioctl(client, + ION_IOC_CLEAN_CACHES, arg); + case ION_IOC_INV_CACHES: + return client->dev->custom_ioctl(client, + ION_IOC_INV_CACHES, arg); + case ION_IOC_CLEAN_INV_CACHES: + return client->dev->custom_ioctl(client, + ION_IOC_CLEAN_INV_CACHES, arg); default: return -ENOTTY; } if (dir & _IOC_READ) { if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) { - if (cleanup_handle) - ion_free(client, cleanup_handle); + if (cleanup_handle) { + mutex_lock(&client->lock); + user_ion_free_nolock(client, cleanup_handle); + ion_handle_put_nolock(cleanup_handle); + mutex_unlock(&client->lock); + } return -EFAULT; } } + if (cleanup_handle) + ion_handle_put(cleanup_handle); return ret; } @@ -1400,7 +1762,6 @@ { struct ion_client *client = file->private_data; - pr_debug("%s: %d\n", __func__, __LINE__); ion_client_destroy(client); return 0; } @@ -1412,7 +1773,6 @@ struct ion_client *client; char debug_name[64]; - pr_debug("%s: %d\n", __func__, __LINE__); snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); client = ion_client_create(dev, debug_name); if (IS_ERR(client)) @@ -1448,6 +1808,106 @@ return size; } +/** + * Create a mem_map of the heap. + * @param s seq_file to log error message to. + * @param heap The heap to create mem_map for. + * @param mem_map The mem map to be created. + */ +void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap, + struct list_head *mem_map) +{ + struct ion_device *dev = heap->dev; + struct rb_node *cnode; + size_t size; + struct ion_client *client; + + if (!heap->ops->phys) + return; + + down_read(&dev->lock); + for (cnode = rb_first(&dev->clients); cnode; cnode = rb_next(cnode)) { + struct rb_node *hnode; + client = rb_entry(cnode, struct ion_client, node); + + mutex_lock(&client->lock); + for (hnode = rb_first(&client->handles); + hnode; + hnode = rb_next(hnode)) { + struct ion_handle *handle = rb_entry( + hnode, struct ion_handle, node); + if (handle->buffer->heap == heap) { + struct mem_map_data *data = + kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto inner_error; + heap->ops->phys(heap, handle->buffer, + &(data->addr), &size); + data->size = (unsigned long) size; + data->addr_end = data->addr + data->size - 1; + data->client_name = kstrdup(client->name, + GFP_KERNEL); + if (!data->client_name) { + kfree(data); + goto inner_error; + } + list_add(&data->node, mem_map); + } + } + mutex_unlock(&client->lock); + } + up_read(&dev->lock); + return; + +inner_error: + seq_puts(s, + "ERROR: out of memory. Part of memory map will not be logged\n"); + mutex_unlock(&client->lock); + up_read(&dev->lock); +} + +/** + * Free the memory allocated by ion_debug_mem_map_create + * @param mem_map The mem map to free. + */ +static void ion_debug_mem_map_destroy(struct list_head *mem_map) +{ + if (mem_map) { + struct mem_map_data *data, *tmp; + list_for_each_entry_safe(data, tmp, mem_map, node) { + list_del(&data->node); + kfree(data->client_name); + kfree(data); + } + } +} + +static int mem_map_cmp(void *priv, struct list_head *a, struct list_head *b) +{ + struct mem_map_data *d1, *d2; + d1 = list_entry(a, struct mem_map_data, node); + d2 = list_entry(b, struct mem_map_data, node); + if (d1->addr == d2->addr) + return d1->size - d2->size; + return d1->addr - d2->addr; +} + +/** + * Print heap debug information. + * @param s seq_file to log message to. + * @param heap pointer to heap that we will print debug information for. + */ +static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap) +{ + if (heap->ops->print_debug) { + struct list_head mem_map = LIST_HEAD_INIT(mem_map); + ion_debug_mem_map_create(s, heap, &mem_map); + list_sort(NULL, &mem_map, mem_map_cmp); + heap->ops->print_debug(heap, s, &mem_map); + ion_debug_mem_map_destroy(&mem_map); + } +} + static int ion_debug_heap_show(struct seq_file *s, void *unused) { struct ion_heap *heap = s->private; @@ -1459,6 +1919,7 @@ seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size"); seq_puts(s, "----------------------------------------------------\n"); + mutex_lock(&debugfs_mutex); for (n = rb_first(&dev->clients); n; n = rb_next(n)) { struct ion_client *client = rb_entry(n, struct ion_client, node); @@ -1477,6 +1938,8 @@ client->pid, size); } } + mutex_unlock(&debugfs_mutex); + seq_puts(s, "----------------------------------------------------\n"); seq_puts(s, "orphaned allocations (info is from last known client):\n"); mutex_lock(&dev->buffer_lock); @@ -1507,6 +1970,7 @@ if (heap->debug_show) heap->debug_show(heap, s, unused); + ion_heap_print_debug(s, heap); return 0; } @@ -1522,6 +1986,31 @@ .release = single_release, }; +void show_ion_usage(struct ion_device *dev) +{ + struct ion_heap *heap; + + if (!down_read_trylock(&dev->lock)) { + pr_err("Ion output would deadlock, can't print debug information\n"); + return; + } + + pr_info("%16.s %16.s %16.s\n", "Heap name", "Total heap size", + "Total orphaned size"); + pr_info("---------------------------------\n"); + plist_for_each_entry(heap, &dev->heaps, node) { + pr_info("%16.s 0x%16.lx 0x%16.lx\n", + heap->name, atomic_long_read(&heap->total_allocated), + atomic_long_read(&heap->total_allocated) - + atomic_long_read(&heap->total_handles)); + if (heap->debug_show) + heap->debug_show(heap, NULL, 0); + + } + up_read(&dev->lock); +} + +#ifdef DEBUG_HEAP_SHRINKER static int debug_shrink_set(void *data, u64 val) { struct ion_heap *heap = data; @@ -1556,6 +2045,7 @@ DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, debug_shrink_set, "%llu\n"); +#endif void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) { @@ -1595,6 +2085,7 @@ path, heap->name); } +#ifdef DEBUG_HEAP_SHRINKER if (heap->shrinker.count_objects && heap->shrinker.scan_objects) { char debug_name[64]; @@ -1610,11 +2101,36 @@ path, debug_name); } } +#endif up_write(&dev->lock); } EXPORT_SYMBOL(ion_device_add_heap); +int ion_walk_heaps(struct ion_client *client, int heap_id, + enum ion_heap_type type, void *data, + int (*f)(struct ion_heap *heap, void *data)) +{ + int ret_val = 0; + struct ion_heap *heap; + struct ion_device *dev = client->dev; + /* + * traverse the list of heaps available in this system + * and find the heap that is specified. + */ + down_write(&dev->lock); + plist_for_each_entry(heap, &dev->heaps, node) { + if (ION_HEAP(heap->id) != heap_id || + type != heap->type) + continue; + ret_val = f(heap, data); + break; + } + up_write(&dev->lock); + return ret_val; +} +EXPORT_SYMBOL(ion_walk_heaps); + struct ion_device *ion_device_create(long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, @@ -1661,6 +2177,8 @@ init_rwsem(&idev->lock); plist_head_init(&idev->heaps); idev->clients = RB_ROOT; + ion_root_client = &idev->clients; + mutex_init(&debugfs_mutex); return idev; } EXPORT_SYMBOL(ion_device_create); @@ -1698,13 +2216,28 @@ int ret = memblock_reserve(data->heaps[i].base, data->heaps[i].size); if (ret) - pr_err("memblock reserve of %zx@%lx failed\n", + pr_err("memblock reserve of %zx@%pa failed\n", data->heaps[i].size, - data->heaps[i].base); + &data->heaps[i].base); } - pr_info("%s: %s reserved base %lx size %zu\n", __func__, + pr_info("%s: %s reserved base %pa size %zu\n", __func__, data->heaps[i].name, - data->heaps[i].base, + &data->heaps[i].base, data->heaps[i].size); } } + +void lock_client(struct ion_client *client) +{ + mutex_lock(&client->lock); +} + +void unlock_client(struct ion_client *client) +{ + mutex_unlock(&client->lock); +} + +struct ion_buffer *get_buffer(struct ion_handle *handle) +{ + return handle->buffer; +}