/* * * drivers/staging/android/ion/ion.c * * Copyright (C) 2011 Google, Inc. * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ion.h" #include "ion_priv.h" #include "compat_ion.h" /** * struct ion_device - the metadata of the ion device node * @dev: the actual misc device * @buffers: an rb tree of all the existing buffers * @buffer_lock: lock protecting the tree of buffers * @lock: rwsem protecting the tree of heaps and clients * @heaps: list of all the heaps in the system * @user_clients: list of all the clients created from userspace */ struct ion_device { struct miscdevice dev; struct rb_root buffers; struct mutex buffer_lock; struct rw_semaphore lock; struct plist_head heaps; long (*custom_ioctl)(struct ion_client *client, unsigned int cmd, unsigned long arg); struct rb_root clients; struct dentry *debug_root; struct dentry *heaps_debug_root; struct dentry *clients_debug_root; }; /** * struct ion_client - a process/hw block local address space * @node: node in the tree of all clients * @dev: backpointer to ion device * @handles: an rb tree of all the handles in this client * @idr: an idr space for allocating handle ids * @lock: lock protecting the tree of handles * @name: used for debugging * @display_name: used for debugging (unique version of @name) * @display_serial: used for debugging (to make display_name unique) * @task: used for debugging * * A client represents a list of buffers this client may access. * The mutex stored here is used to protect both handles tree * as well as the handles themselves, and should be held while modifying either. */ struct ion_client { struct rb_node node; struct ion_device *dev; struct rb_root handles; struct idr idr; struct mutex lock; char *name; char *display_name; int display_serial; struct task_struct *task; pid_t pid; struct dentry *debug_root; }; /** * ion_handle - a client local reference to a buffer * @ref: reference count * @client: back pointer to the client the buffer resides in * @buffer: pointer to the buffer * @node: node in the client's handle rbtree * @kmap_cnt: count of times this client has mapped to kernel * @id: client-unique id allocated by client->idr * * Modifications to node, map_cnt or mapping should be protected by the * lock in the client. Other fields are never changed after initialization. */ struct ion_handle { struct kref ref; unsigned int user_ref_count; struct ion_client *client; struct ion_buffer *buffer; struct rb_node node; unsigned int kmap_cnt; int id; }; bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) { return (buffer->flags & ION_FLAG_CACHED) && !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC); } bool ion_buffer_cached(struct ion_buffer *buffer) { return !!(buffer->flags & ION_FLAG_CACHED); } static inline struct page *ion_buffer_page(struct page *page) { return (struct page *)((unsigned long)page & ~(1UL)); } static inline bool ion_buffer_page_is_dirty(struct page *page) { return !!((unsigned long)page & 1UL); } static inline void ion_buffer_page_dirty(struct page **page) { *page = (struct page *)((unsigned long)(*page) | 1UL); } static inline void ion_buffer_page_clean(struct page **page) { *page = (struct page *)((unsigned long)(*page) & ~(1UL)); } /* this function should only be called while dev->lock is held */ static void ion_buffer_add(struct ion_device *dev, struct ion_buffer *buffer) { struct rb_node **p = &dev->buffers.rb_node; struct rb_node *parent = NULL; struct ion_buffer *entry; while (*p) { parent = *p; entry = rb_entry(parent, struct ion_buffer, node); if (buffer < entry) { p = &(*p)->rb_left; } else if (buffer > entry) { p = &(*p)->rb_right; } else { pr_err("%s: buffer already found.", __func__); BUG(); } } rb_link_node(&buffer->node, parent, p); rb_insert_color(&buffer->node, &dev->buffers); } /* this function should only be called while dev->lock is held */ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, struct ion_device *dev, unsigned long len, unsigned long align, unsigned long flags) { struct ion_buffer *buffer; struct sg_table *table; struct scatterlist *sg; int i, ret; buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); if (!buffer) return ERR_PTR(-ENOMEM); buffer->heap = heap; buffer->flags = flags; kref_init(&buffer->ref); ret = heap->ops->allocate(heap, buffer, len, align, flags); if (ret) { if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) goto err2; ion_heap_freelist_drain(heap, 0); ret = heap->ops->allocate(heap, buffer, len, align, flags); if (ret) goto err2; } buffer->dev = dev; buffer->size = len; buffer->flags = flags; INIT_LIST_HEAD(&buffer->vmas); table = heap->ops->map_dma(heap, buffer); if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error")) table = ERR_PTR(-EINVAL); if (IS_ERR(table)) { ret = -EINVAL; goto err1; } buffer->sg_table = table; if (ion_buffer_fault_user_mappings(buffer)) { int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; struct scatterlist *sg; int i, j, k = 0; buffer->pages = vmalloc(sizeof(struct page *) * num_pages); if (!buffer->pages) { ret = -ENOMEM; goto err; } for_each_sg(table->sgl, sg, table->nents, i) { struct page *page = sg_page(sg); for (j = 0; j < sg->length / PAGE_SIZE; j++) buffer->pages[k++] = page++; } } mutex_init(&buffer->lock); /* * this will set up dma addresses for the sglist -- it is not * technically correct as per the dma api -- a specific * device isn't really taking ownership here. However, in practice on * our systems the only dma_address space is physical addresses. * Additionally, we can't afford the overhead of invalidating every * allocation via dma_map_sg. The implicit contract here is that * memory coming from the heaps is ready for dma, ie if it has a * cached mapping that mapping has been invalidated */ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) { sg_dma_address(sg) = sg_phys(sg); sg_dma_len(sg) = sg->length; } mutex_lock(&dev->buffer_lock); ion_buffer_add(dev, buffer); mutex_unlock(&dev->buffer_lock); atomic_long_add(len, &heap->total_allocated); return buffer; err: heap->ops->unmap_dma(heap, buffer); err1: heap->ops->free(buffer); err2: kfree(buffer); return ERR_PTR(ret); } void ion_buffer_destroy(struct ion_buffer *buffer) { if (WARN_ON(buffer->kmap_cnt > 0)) buffer->heap->ops->unmap_kernel(buffer->heap, buffer); buffer->heap->ops->unmap_dma(buffer->heap, buffer); atomic_long_sub(buffer->size, &buffer->heap->total_allocated); buffer->heap->ops->free(buffer); vfree(buffer->pages); kfree(buffer); } static void _ion_buffer_destroy(struct kref *kref) { struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); struct ion_heap *heap = buffer->heap; struct ion_device *dev = buffer->dev; msm_dma_buf_freed(buffer); mutex_lock(&dev->buffer_lock); rb_erase(&buffer->node, &dev->buffers); mutex_unlock(&dev->buffer_lock); if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) ion_heap_freelist_add(heap, buffer); else ion_buffer_destroy(buffer); } static void ion_buffer_get(struct ion_buffer *buffer) { kref_get(&buffer->ref); } static int ion_buffer_put(struct ion_buffer *buffer) { return kref_put(&buffer->ref, _ion_buffer_destroy); } static void ion_buffer_add_to_handle(struct ion_buffer *buffer) { mutex_lock(&buffer->lock); if (buffer->handle_count == 0) atomic_long_add(buffer->size, &buffer->heap->total_handles); buffer->handle_count++; mutex_unlock(&buffer->lock); } static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) { /* * when a buffer is removed from a handle, if it is not in * any other handles, copy the taskcomm and the pid of the * process it's being removed from into the buffer. At this * point there will be no way to track what processes this buffer is * being used by, it only exists as a dma_buf file descriptor. * The taskcomm and pid can provide a debug hint as to where this fd * is in the system */ mutex_lock(&buffer->lock); buffer->handle_count--; BUG_ON(buffer->handle_count < 0); if (!buffer->handle_count) { struct task_struct *task; task = current->group_leader; get_task_comm(buffer->task_comm, task); buffer->pid = task_pid_nr(task); atomic_long_sub(buffer->size, &buffer->heap->total_handles); } mutex_unlock(&buffer->lock); } static struct ion_handle *ion_handle_create(struct ion_client *client, struct ion_buffer *buffer) { struct ion_handle *handle; handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); if (!handle) return ERR_PTR(-ENOMEM); kref_init(&handle->ref); RB_CLEAR_NODE(&handle->node); handle->client = client; ion_buffer_get(buffer); ion_buffer_add_to_handle(buffer); handle->buffer = buffer; return handle; } static void ion_handle_kmap_put(struct ion_handle *); static void ion_handle_destroy(struct kref *kref) { struct ion_handle *handle = container_of(kref, struct ion_handle, ref); struct ion_client *client = handle->client; struct ion_buffer *buffer = handle->buffer; mutex_lock(&buffer->lock); while (handle->kmap_cnt) ion_handle_kmap_put(handle); mutex_unlock(&buffer->lock); idr_remove(&client->idr, handle->id); if (!RB_EMPTY_NODE(&handle->node)) rb_erase(&handle->node, &client->handles); ion_buffer_remove_from_handle(buffer); ion_buffer_put(buffer); kfree(handle); } struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) { return handle->buffer; } static void ion_handle_get(struct ion_handle *handle) { kref_get(&handle->ref); } /* Must hold the client lock */ static struct ion_handle *ion_handle_get_check_overflow( struct ion_handle *handle) { if (atomic_read(&handle->ref.refcount) + 1 == 0) return ERR_PTR(-EOVERFLOW); ion_handle_get(handle); return handle; } static int ion_handle_put_nolock(struct ion_handle *handle) { int ret; ret = kref_put(&handle->ref, ion_handle_destroy); return ret; } int ion_handle_put(struct ion_handle *handle) { struct ion_client *client = handle->client; int ret; mutex_lock(&client->lock); ret = ion_handle_put_nolock(handle); mutex_unlock(&client->lock); return ret; } /* Must hold the client lock */ static void user_ion_handle_get(struct ion_handle *handle) { if (handle->user_ref_count++ == 0) kref_get(&handle->ref); } /* Must hold the client lock */ static struct ion_handle *user_ion_handle_get_check_overflow( struct ion_handle *handle) { if (handle->user_ref_count + 1 == 0) return ERR_PTR(-EOVERFLOW); user_ion_handle_get(handle); return handle; } /* passes a kref to the user ref count. * We know we're holding a kref to the object before and * after this call, so no need to reverify handle. * Caller must hold the client lock, except for ION_IOC_ALLOC. */ static struct ion_handle *pass_to_user(struct ion_handle *handle) { struct ion_handle *ret; ret = user_ion_handle_get_check_overflow(handle); ion_handle_put_nolock(handle); return ret; } /* Must hold the client lock */ static int user_ion_handle_put_nolock(struct ion_handle *handle) { int ret = 0; if (--handle->user_ref_count == 0) ret = ion_handle_put_nolock(handle); return ret; } static struct ion_handle *ion_handle_lookup(struct ion_client *client, struct ion_buffer *buffer) { struct rb_node *n = client->handles.rb_node; while (n) { struct ion_handle *entry = rb_entry(n, struct ion_handle, node); if (buffer < entry->buffer) n = n->rb_left; else if (buffer > entry->buffer) n = n->rb_right; else return entry; } return ERR_PTR(-EINVAL); } struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, int id) { struct ion_handle *handle; handle = idr_find(&client->idr, id); if (handle) return ion_handle_get_check_overflow(handle); return ERR_PTR(-EINVAL); } bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) { WARN_ON(!mutex_is_locked(&client->lock)); return idr_find(&client->idr, handle->id) == handle; } static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) { int id; struct rb_node **p = &client->handles.rb_node; struct rb_node *parent = NULL; struct ion_handle *entry; id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL); if (id < 0) return id; handle->id = id; while (*p) { parent = *p; entry = rb_entry(parent, struct ion_handle, node); if (handle->buffer < entry->buffer) p = &(*p)->rb_left; else if (handle->buffer > entry->buffer) p = &(*p)->rb_right; else WARN(1, "%s: buffer already found.", __func__); } rb_link_node(&handle->node, parent, p); rb_insert_color(&handle->node, &client->handles); return 0; } static struct ion_handle *__ion_alloc(struct ion_client *client, size_t len, size_t align, unsigned int heap_id_mask, unsigned int flags, bool grab_handle) { struct ion_handle *handle; struct ion_device *dev = client->dev; struct ion_buffer *buffer = NULL; struct ion_heap *heap; int ret; const unsigned int MAX_DBG_STR_LEN = 64; char dbg_str[MAX_DBG_STR_LEN]; unsigned int dbg_str_idx = 0; dbg_str[0] = '\0'; /* * For now, we don't want to fault in pages individually since * clients are already doing manual cache maintenance. In * other words, the implicit caching infrastructure is in * place (in code) but should not be used. */ flags |= ION_FLAG_CACHED_NEEDS_SYNC; pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__, len, align, heap_id_mask, flags); /* * traverse the list of heaps available in this system in priority * order. If the heap type is supported by the client, and matches the * request of the caller allocate from it. Repeat until allocate has * succeeded or all heaps have been tried */ len = PAGE_ALIGN(len); if (!len) return ERR_PTR(-EINVAL); down_read(&dev->lock); plist_for_each_entry(heap, &dev->heaps, node) { /* if the caller didn't specify this heap id */ if (!((1 << heap->id) & heap_id_mask)) continue; trace_ion_alloc_buffer_start(client->name, heap->name, len, heap_id_mask, flags); buffer = ion_buffer_create(heap, dev, len, align, flags); trace_ion_alloc_buffer_end(client->name, heap->name, len, heap_id_mask, flags); if (!IS_ERR(buffer)) break; trace_ion_alloc_buffer_fallback(client->name, heap->name, len, heap_id_mask, flags, PTR_ERR(buffer)); if (dbg_str_idx < MAX_DBG_STR_LEN) { unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1; int ret_value = snprintf(&dbg_str[dbg_str_idx], len_left, "%s ", heap->name); if (ret_value >= len_left) { /* overflow */ dbg_str[MAX_DBG_STR_LEN-1] = '\0'; dbg_str_idx = MAX_DBG_STR_LEN; } else if (ret_value >= 0) { dbg_str_idx += ret_value; } else { /* error */ dbg_str[MAX_DBG_STR_LEN-1] = '\0'; } } } up_read(&dev->lock); if (buffer == NULL) { trace_ion_alloc_buffer_fail(client->name, dbg_str, len, heap_id_mask, flags, -ENODEV); return ERR_PTR(-ENODEV); } if (IS_ERR(buffer)) { trace_ion_alloc_buffer_fail(client->name, dbg_str, len, heap_id_mask, flags, PTR_ERR(buffer)); pr_debug("ION is unable to allocate 0x%zx bytes (alignment: 0x%zx) from heap(s) %sfor client %s\n", len, align, dbg_str, client->name); return ERR_CAST(buffer); } handle = ion_handle_create(client, buffer); /* * ion_buffer_create will create a buffer with a ref_cnt of 1, * and ion_handle_create will take a second reference, drop one here */ ion_buffer_put(buffer); if (IS_ERR(handle)) return handle; mutex_lock(&client->lock); if (grab_handle) ion_handle_get(handle); ret = ion_handle_add(client, handle); mutex_unlock(&client->lock); if (ret) { ion_handle_put(handle); handle = ERR_PTR(ret); } return handle; } struct ion_handle *ion_alloc(struct ion_client *client, size_t len, size_t align, unsigned int heap_id_mask, unsigned int flags) { return __ion_alloc(client, len, align, heap_id_mask, flags, false); } EXPORT_SYMBOL(ion_alloc); void ion_free_nolock(struct ion_client *client, struct ion_handle *handle) { bool valid_handle; BUG_ON(client != handle->client); valid_handle = ion_handle_validate(client, handle); if (!valid_handle) { WARN(1, "%s: invalid handle passed to free.\n", __func__); return; } ion_handle_put_nolock(handle); } static void user_ion_free_nolock(struct ion_client *client, struct ion_handle *handle) { bool valid_handle; WARN_ON(client != handle->client); valid_handle = ion_handle_validate(client, handle); if (!valid_handle) { WARN(1, "%s: invalid handle passed to free.\n", __func__); return; } if (handle->user_ref_count == 0) { WARN(1, "%s: User does not have access!\n", __func__); return; } user_ion_handle_put_nolock(handle); } void ion_free(struct ion_client *client, struct ion_handle *handle) { BUG_ON(client != handle->client); mutex_lock(&client->lock); ion_free_nolock(client, handle); mutex_unlock(&client->lock); } EXPORT_SYMBOL(ion_free); static int __ion_phys(struct ion_client *client, struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len, bool lock_client) { struct ion_buffer *buffer; int ret; if (lock_client) mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { if (lock_client) mutex_unlock(&client->lock); return -EINVAL; } buffer = handle->buffer; if (!buffer->heap->ops->phys) { pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n", __func__, buffer->heap->name, buffer->heap->type); if (lock_client) mutex_unlock(&client->lock); return -ENODEV; } if (lock_client) mutex_unlock(&client->lock); ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); return ret; } int ion_phys(struct ion_client *client, struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len) { return __ion_phys(client, handle, addr, len, true); } EXPORT_SYMBOL(ion_phys); int ion_phys_nolock(struct ion_client *client, struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len) { return __ion_phys(client, handle, addr, len, false); } static void *ion_buffer_kmap_get(struct ion_buffer *buffer) { void *vaddr; if (buffer->kmap_cnt) { buffer->kmap_cnt++; return buffer->vaddr; } vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error")) return ERR_PTR(-EINVAL); if (IS_ERR(vaddr)) return vaddr; buffer->vaddr = vaddr; buffer->kmap_cnt++; return vaddr; } static void *ion_handle_kmap_get(struct ion_handle *handle) { struct ion_buffer *buffer = handle->buffer; void *vaddr; if (handle->kmap_cnt) { handle->kmap_cnt++; return buffer->vaddr; } vaddr = ion_buffer_kmap_get(buffer); if (IS_ERR(vaddr)) return vaddr; handle->kmap_cnt++; return vaddr; } static void ion_buffer_kmap_put(struct ion_buffer *buffer) { buffer->kmap_cnt--; if (!buffer->kmap_cnt) { buffer->heap->ops->unmap_kernel(buffer->heap, buffer); buffer->vaddr = NULL; } } static void ion_handle_kmap_put(struct ion_handle *handle) { struct ion_buffer *buffer = handle->buffer; if (!handle->kmap_cnt) { WARN(1, "%s: Double unmap detected! bailing...\n", __func__); return; } handle->kmap_cnt--; if (!handle->kmap_cnt) ion_buffer_kmap_put(buffer); } void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; void *vaddr; mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { pr_err("%s: invalid handle passed to map_kernel.\n", __func__); mutex_unlock(&client->lock); return ERR_PTR(-EINVAL); } buffer = handle->buffer; if (!handle->buffer->heap->ops->map_kernel) { pr_err("%s: map_kernel is not implemented by this heap.\n", __func__); mutex_unlock(&client->lock); return ERR_PTR(-ENODEV); } mutex_lock(&buffer->lock); vaddr = ion_handle_kmap_get(handle); mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); return vaddr; } EXPORT_SYMBOL(ion_map_kernel); void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; mutex_lock(&client->lock); buffer = handle->buffer; mutex_lock(&buffer->lock); ion_handle_kmap_put(handle); mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); } EXPORT_SYMBOL(ion_unmap_kernel); static struct mutex debugfs_mutex; static struct rb_root *ion_root_client; static int is_client_alive(struct ion_client *client) { struct rb_node *node; struct ion_client *tmp; struct ion_device *dev; node = ion_root_client->rb_node; dev = container_of(ion_root_client, struct ion_device, clients); down_read(&dev->lock); while (node) { tmp = rb_entry(node, struct ion_client, node); if (client < tmp) { node = node->rb_left; } else if (client > tmp) { node = node->rb_right; } else { up_read(&dev->lock); return 1; } } up_read(&dev->lock); return 0; } static int ion_debug_client_show(struct seq_file *s, void *unused) { struct ion_client *client = s->private; struct rb_node *n; mutex_lock(&debugfs_mutex); if (!is_client_alive(client)) { seq_printf(s, "ion_client 0x%pK dead, can't dump its buffers\n", client); mutex_unlock(&debugfs_mutex); return 0; } seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s\n", "heap_name", "size_in_bytes", "handle refcount", "buffer"); mutex_lock(&client->lock); for (n = rb_first(&client->handles); n; n = rb_next(n)) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); seq_printf(s, "%16.16s: %16zx : %16d : %12pK", handle->buffer->heap->name, handle->buffer->size, atomic_read(&handle->ref.refcount), handle->buffer); seq_printf(s, "\n"); } mutex_unlock(&client->lock); mutex_unlock(&debugfs_mutex); return 0; } static int ion_debug_client_open(struct inode *inode, struct file *file) { return single_open(file, ion_debug_client_show, inode->i_private); } static const struct file_operations debug_client_fops = { .open = ion_debug_client_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int ion_get_client_serial(const struct rb_root *root, const unsigned char *name) { int serial = -1; struct rb_node *node; for (node = rb_first(root); node; node = rb_next(node)) { struct ion_client *client = rb_entry(node, struct ion_client, node); if (strcmp(client->name, name)) continue; serial = max(serial, client->display_serial); } return serial + 1; } struct ion_client *ion_client_create(struct ion_device *dev, const char *name) { struct ion_client *client; struct task_struct *task; struct rb_node **p; struct rb_node *parent = NULL; struct ion_client *entry; pid_t pid; if (!name) { pr_err("%s: Name cannot be null\n", __func__); return ERR_PTR(-EINVAL); } get_task_struct(current->group_leader); task_lock(current->group_leader); pid = task_pid_nr(current->group_leader); /* * don't bother to store task struct for kernel threads, * they can't be killed anyway */ if (current->group_leader->flags & PF_KTHREAD) { put_task_struct(current->group_leader); task = NULL; } else { task = current->group_leader; } task_unlock(current->group_leader); client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); if (!client) goto err_put_task_struct; client->dev = dev; client->handles = RB_ROOT; idr_init(&client->idr); mutex_init(&client->lock); client->task = task; client->pid = pid; client->name = kstrdup(name, GFP_KERNEL); if (!client->name) goto err_free_client; down_write(&dev->lock); client->display_serial = ion_get_client_serial(&dev->clients, name); client->display_name = kasprintf( GFP_KERNEL, "%s-%d", name, client->display_serial); if (!client->display_name) { up_write(&dev->lock); goto err_free_client_name; } p = &dev->clients.rb_node; while (*p) { parent = *p; entry = rb_entry(parent, struct ion_client, node); if (client < entry) p = &(*p)->rb_left; else if (client > entry) p = &(*p)->rb_right; } rb_link_node(&client->node, parent, p); rb_insert_color(&client->node, &dev->clients); client->debug_root = debugfs_create_file(client->display_name, 0664, dev->clients_debug_root, client, &debug_client_fops); if (!client->debug_root) { char buf[256], *path; path = dentry_path(dev->clients_debug_root, buf, 256); pr_err("Failed to create client debugfs at %s/%s\n", path, client->display_name); } up_write(&dev->lock); return client; err_free_client_name: kfree(client->name); err_free_client: kfree(client); err_put_task_struct: if (task) put_task_struct(current->group_leader); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL(ion_client_create); void ion_client_destroy(struct ion_client *client) { struct ion_device *dev = client->dev; struct rb_node *n; mutex_lock(&debugfs_mutex); while ((n = rb_first(&client->handles))) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); ion_handle_destroy(&handle->ref); } idr_destroy(&client->idr); down_write(&dev->lock); if (client->task) put_task_struct(client->task); rb_erase(&client->node, &dev->clients); debugfs_remove_recursive(client->debug_root); up_write(&dev->lock); kfree(client->display_name); kfree(client->name); kfree(client); mutex_unlock(&debugfs_mutex); } EXPORT_SYMBOL(ion_client_destroy); int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle, unsigned long *flags) { struct ion_buffer *buffer; mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { pr_err("%s: invalid handle passed to %s.\n", __func__, __func__); mutex_unlock(&client->lock); return -EINVAL; } buffer = handle->buffer; mutex_lock(&buffer->lock); *flags = buffer->flags; mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); return 0; } EXPORT_SYMBOL(ion_handle_get_flags); int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle, size_t *size) { struct ion_buffer *buffer; mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { pr_err("%s: invalid handle passed to %s.\n", __func__, __func__); mutex_unlock(&client->lock); return -EINVAL; } buffer = handle->buffer; mutex_lock(&buffer->lock); *size = buffer->size; mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); return 0; } EXPORT_SYMBOL(ion_handle_get_size); /** * ion_sg_table - get an sg_table for the buffer * * NOTE: most likely you should NOT being using this API. * You should be using Ion as a DMA Buf exporter and using * the sg_table returned by dma_buf_map_attachment. */ struct sg_table *ion_sg_table(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; struct sg_table *table; mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { pr_err("%s: invalid handle passed to map_dma.\n", __func__); mutex_unlock(&client->lock); return ERR_PTR(-EINVAL); } buffer = handle->buffer; table = buffer->sg_table; mutex_unlock(&client->lock); return table; } EXPORT_SYMBOL(ion_sg_table); struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base, size_t chunk_size, size_t total_size) { struct sg_table *table; int i, n_chunks, ret; struct scatterlist *sg; table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); if (!table) return ERR_PTR(-ENOMEM); n_chunks = DIV_ROUND_UP(total_size, chunk_size); pr_debug("creating sg_table with %d chunks\n", n_chunks); ret = sg_alloc_table(table, n_chunks, GFP_KERNEL); if (ret) goto err0; for_each_sg(table->sgl, sg, table->nents, i) { dma_addr_t addr = buffer_base + i * chunk_size; sg_dma_address(sg) = addr; sg->length = chunk_size; } return table; err0: kfree(table); return ERR_PTR(ret); } static struct sg_table *ion_dupe_sg_table(struct sg_table *orig_table) { int ret, i; struct scatterlist *sg, *sg_orig; struct sg_table *table; table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); if (!table) return NULL; ret = sg_alloc_table(table, orig_table->nents, GFP_KERNEL); if (ret) { kfree(table); return NULL; } sg_orig = orig_table->sgl; for_each_sg(table->sgl, sg, table->nents, i) { memcpy(sg, sg_orig, sizeof(*sg)); sg_orig = sg_next(sg_orig); } return table; } static void ion_buffer_sync_for_device(struct ion_buffer *buffer, struct device *dev, enum dma_data_direction direction); static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direction) { struct dma_buf *dmabuf = attachment->dmabuf; struct ion_buffer *buffer = dmabuf->priv; struct sg_table *table; table = ion_dupe_sg_table(buffer->sg_table); if (!table) return NULL; ion_buffer_sync_for_device(buffer, attachment->dev, direction); return table; } static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table, enum dma_data_direction direction) { sg_free_table(table); kfree(table); } void ion_pages_sync_for_device(struct device *dev, struct page *page, size_t size, enum dma_data_direction dir) { struct scatterlist sg; WARN_ONCE(!dev, "A device is required for dma_sync\n"); sg_init_table(&sg, 1); sg_set_page(&sg, page, size, 0); /* * This is not correct - sg_dma_address needs a dma_addr_t that is valid * for the targeted device, but this works on the currently targeted * hardware. */ sg_dma_address(&sg) = page_to_phys(page); dma_sync_sg_for_device(dev, &sg, 1, dir); } struct ion_vma_list { struct list_head list; struct vm_area_struct *vma; }; static void ion_buffer_sync_for_device(struct ion_buffer *buffer, struct device *dev, enum dma_data_direction dir) { struct ion_vma_list *vma_list; int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; int i; if (!ion_buffer_fault_user_mappings(buffer)) return; mutex_lock(&buffer->lock); for (i = 0; i < pages; i++) { struct page *page = buffer->pages[i]; if (ion_buffer_page_is_dirty(page)) ion_pages_sync_for_device(dev, ion_buffer_page(page), PAGE_SIZE, dir); ion_buffer_page_clean(buffer->pages + i); } list_for_each_entry(vma_list, &buffer->vmas, list) { struct vm_area_struct *vma = vma_list->vma; zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, NULL); } mutex_unlock(&buffer->lock); } static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct ion_buffer *buffer = vma->vm_private_data; unsigned long pfn; int ret; mutex_lock(&buffer->lock); ion_buffer_page_dirty(buffer->pages + vmf->pgoff); BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); mutex_unlock(&buffer->lock); if (ret) return VM_FAULT_ERROR; return VM_FAULT_NOPAGE; } static void ion_vm_open(struct vm_area_struct *vma) { struct ion_buffer *buffer = vma->vm_private_data; struct ion_vma_list *vma_list; vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); if (!vma_list) return; vma_list->vma = vma; mutex_lock(&buffer->lock); list_add(&vma_list->list, &buffer->vmas); mutex_unlock(&buffer->lock); } static void ion_vm_close(struct vm_area_struct *vma) { struct ion_buffer *buffer = vma->vm_private_data; struct ion_vma_list *vma_list, *tmp; mutex_lock(&buffer->lock); list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { if (vma_list->vma != vma) continue; list_del(&vma_list->list); kfree(vma_list); break; } mutex_unlock(&buffer->lock); if (buffer->heap->ops->unmap_user) buffer->heap->ops->unmap_user(buffer->heap, buffer); } static const struct vm_operations_struct ion_vma_ops = { .open = ion_vm_open, .close = ion_vm_close, .fault = ion_vm_fault, }; static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) { struct ion_buffer *buffer = dmabuf->priv; int ret = 0; if (!buffer->heap->ops->map_user) { pr_err("%s: this heap does not define a method for mapping to userspace\n", __func__); return -EINVAL; } if (ion_buffer_fault_user_mappings(buffer)) { vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_private_data = buffer; vma->vm_ops = &ion_vma_ops; vma->vm_flags |= VM_MIXEDMAP; ion_vm_open(vma); return 0; } if (!(buffer->flags & ION_FLAG_CACHED)) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); mutex_lock(&buffer->lock); /* now map it to userspace */ ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); mutex_unlock(&buffer->lock); if (ret) pr_err("%s: failure mapping buffer to userspace\n", __func__); return ret; } static void ion_dma_buf_release(struct dma_buf *dmabuf) { struct ion_buffer *buffer = dmabuf->priv; ion_buffer_put(buffer); } static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) { struct ion_buffer *buffer = dmabuf->priv; return buffer->vaddr + offset * PAGE_SIZE; } static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, void *ptr) { } static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; void *vaddr; if (!buffer->heap->ops->map_kernel) { pr_err("%s: map kernel is not implemented by this heap.\n", __func__); return -ENODEV; } mutex_lock(&buffer->lock); vaddr = ion_buffer_kmap_get(buffer); mutex_unlock(&buffer->lock); return PTR_ERR_OR_ZERO(vaddr); } static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; mutex_lock(&buffer->lock); ion_buffer_kmap_put(buffer); mutex_unlock(&buffer->lock); } static struct dma_buf_ops dma_buf_ops = { .map_dma_buf = ion_map_dma_buf, .unmap_dma_buf = ion_unmap_dma_buf, .mmap = ion_mmap, .release = ion_dma_buf_release, .begin_cpu_access = ion_dma_buf_begin_cpu_access, .end_cpu_access = ion_dma_buf_end_cpu_access, .kmap_atomic = ion_dma_buf_kmap, .kunmap_atomic = ion_dma_buf_kunmap, .kmap = ion_dma_buf_kmap, .kunmap = ion_dma_buf_kunmap, }; static struct dma_buf *__ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle, bool lock_client) { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); struct ion_buffer *buffer; struct dma_buf *dmabuf; bool valid_handle; if (lock_client) mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, handle); if (!valid_handle) { WARN(1, "%s: invalid handle passed to share.\n", __func__); if (lock_client) mutex_unlock(&client->lock); return ERR_PTR(-EINVAL); } buffer = handle->buffer; ion_buffer_get(buffer); if (lock_client) mutex_unlock(&client->lock); exp_info.ops = &dma_buf_ops; exp_info.size = buffer->size; exp_info.flags = O_RDWR; exp_info.priv = buffer; dmabuf = dma_buf_export(&exp_info); if (IS_ERR(dmabuf)) { ion_buffer_put(buffer); return dmabuf; } return dmabuf; } struct dma_buf *ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle) { return __ion_share_dma_buf(client, handle, true); } EXPORT_SYMBOL(ion_share_dma_buf); static int __ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle, bool lock_client) { struct dma_buf *dmabuf; int fd; dmabuf = __ion_share_dma_buf(client, handle, lock_client); if (IS_ERR(dmabuf)) return PTR_ERR(dmabuf); fd = dma_buf_fd(dmabuf, O_CLOEXEC); if (fd < 0) dma_buf_put(dmabuf); return fd; } int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) { return __ion_share_dma_buf_fd(client, handle, true); } EXPORT_SYMBOL(ion_share_dma_buf_fd); bool ion_dma_buf_is_secure(struct dma_buf *dmabuf) { struct ion_buffer *buffer; enum ion_heap_type type; /* Return false if we didn't create the buffer */ if (!dmabuf || dmabuf->ops != &dma_buf_ops) return false; buffer = dmabuf->priv; if (!buffer || !buffer->heap) return false; type = buffer->heap->type; return (type == (enum ion_heap_type)ION_HEAP_TYPE_SECURE_DMA || type == (enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE) ? true : false; } EXPORT_SYMBOL(ion_dma_buf_is_secure); static int ion_share_dma_buf_fd_nolock(struct ion_client *client, struct ion_handle *handle) { return __ion_share_dma_buf_fd(client, handle, false); } static struct ion_handle *__ion_import_dma_buf(struct ion_client *client, int fd, bool lock_client) { struct dma_buf *dmabuf; struct ion_buffer *buffer; struct ion_handle *handle; int ret; dmabuf = dma_buf_get(fd); if (IS_ERR(dmabuf)) return ERR_CAST(dmabuf); /* if this memory came from ion */ if (dmabuf->ops != &dma_buf_ops) { pr_err("%s: can not import dmabuf from another exporter\n", __func__); dma_buf_put(dmabuf); return ERR_PTR(-EINVAL); } buffer = dmabuf->priv; if (lock_client) mutex_lock(&client->lock); /* if a handle exists for this buffer just take a reference to it */ handle = ion_handle_lookup(client, buffer); if (!IS_ERR(handle)) { handle = ion_handle_get_check_overflow(handle); if (lock_client) mutex_unlock(&client->lock); goto end; } handle = ion_handle_create(client, buffer); if (IS_ERR(handle)) { if (lock_client) mutex_unlock(&client->lock); goto end; } ret = ion_handle_add(client, handle); if (lock_client) mutex_unlock(&client->lock); if (ret) { if (lock_client) ion_handle_put(handle); else ion_handle_put_nolock(handle); handle = ERR_PTR(ret); } end: dma_buf_put(dmabuf); return handle; } struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) { return __ion_import_dma_buf(client, fd, true); } EXPORT_SYMBOL(ion_import_dma_buf); struct ion_handle *ion_import_dma_buf_nolock(struct ion_client *client, int fd) { return __ion_import_dma_buf(client, fd, false); } static int ion_sync_for_device(struct ion_client *client, int fd) { struct dma_buf *dmabuf; struct ion_buffer *buffer; struct ion_heap *heap; struct device *dev; dmabuf = dma_buf_get(fd); if (IS_ERR(dmabuf)) return PTR_ERR(dmabuf); /* if this memory came from ion */ if (dmabuf->ops != &dma_buf_ops) { pr_err("%s: can not sync dmabuf from another exporter\n", __func__); dma_buf_put(dmabuf); return -EINVAL; } buffer = dmabuf->priv; heap = buffer->heap; dev = heap->priv; if (get_secure_vmid(buffer->flags) > 0) { pr_err("%s: cannot sync a secure dmabuf\n", __func__); dma_buf_put(dmabuf); return -EINVAL; } dma_sync_sg_for_device(dev, buffer->sg_table->sgl, buffer->sg_table->nents, DMA_BIDIRECTIONAL); dma_buf_put(dmabuf); return 0; } /* fix up the cases where the ioctl direction bits are incorrect */ static unsigned int ion_ioctl_dir(unsigned int cmd) { switch (cmd) { case ION_IOC_SYNC: case ION_IOC_FREE: case ION_IOC_CUSTOM: return _IOC_WRITE; default: return _IOC_DIR(cmd); } } static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct ion_client *client = filp->private_data; struct ion_device *dev = client->dev; struct ion_handle *cleanup_handle = NULL; int ret = 0; unsigned int dir; union { struct ion_fd_data fd; struct ion_allocation_data allocation; struct ion_handle_data handle; struct ion_custom_data custom; } data; dir = ion_ioctl_dir(cmd); if (_IOC_SIZE(cmd) > sizeof(data)) return -EINVAL; if (dir & _IOC_WRITE) if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) return -EFAULT; switch (cmd) { case ION_IOC_ALLOC: { struct ion_handle *handle; handle = __ion_alloc(client, data.allocation.len, data.allocation.align, data.allocation.heap_id_mask, data.allocation.flags, true); if (IS_ERR(handle)) return PTR_ERR(handle); pass_to_user(handle); data.allocation.handle = handle->id; cleanup_handle = handle; break; } case ION_IOC_FREE: { struct ion_handle *handle; mutex_lock(&client->lock); handle = ion_handle_get_by_id_nolock(client, data.handle.handle); if (IS_ERR(handle)) { mutex_unlock(&client->lock); return PTR_ERR(handle); } user_ion_free_nolock(client, handle); ion_handle_put_nolock(handle); mutex_unlock(&client->lock); break; } case ION_IOC_SHARE: case ION_IOC_MAP: { struct ion_handle *handle; mutex_lock(&client->lock); handle = ion_handle_get_by_id_nolock(client, data.handle.handle); if (IS_ERR(handle)) { mutex_unlock(&client->lock); return PTR_ERR(handle); } data.fd.fd = ion_share_dma_buf_fd_nolock(client, handle); ion_handle_put_nolock(handle); mutex_unlock(&client->lock); if (data.fd.fd < 0) ret = data.fd.fd; break; } case ION_IOC_IMPORT: { struct ion_handle *handle; mutex_lock(&client->lock); handle = ion_import_dma_buf_nolock(client, data.fd.fd); if (IS_ERR(handle)) { ret = PTR_ERR(handle); } else { handle = pass_to_user(handle); if (IS_ERR(handle)) ret = PTR_ERR(handle); else data.handle.handle = handle->id; } mutex_unlock(&client->lock); break; } case ION_IOC_SYNC: { ret = ion_sync_for_device(client, data.fd.fd); break; } case ION_IOC_CUSTOM: { if (!dev->custom_ioctl) return -ENOTTY; ret = dev->custom_ioctl(client, data.custom.cmd, data.custom.arg); break; } case ION_IOC_CLEAN_CACHES: return client->dev->custom_ioctl(client, ION_IOC_CLEAN_CACHES, arg); case ION_IOC_INV_CACHES: return client->dev->custom_ioctl(client, ION_IOC_INV_CACHES, arg); case ION_IOC_CLEAN_INV_CACHES: return client->dev->custom_ioctl(client, ION_IOC_CLEAN_INV_CACHES, arg); default: return -ENOTTY; } if (dir & _IOC_READ) { if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) { if (cleanup_handle) { mutex_lock(&client->lock); user_ion_free_nolock(client, cleanup_handle); ion_handle_put_nolock(cleanup_handle); mutex_unlock(&client->lock); } return -EFAULT; } } if (cleanup_handle) ion_handle_put(cleanup_handle); return ret; } static int ion_release(struct inode *inode, struct file *file) { struct ion_client *client = file->private_data; ion_client_destroy(client); return 0; } static int ion_open(struct inode *inode, struct file *file) { struct miscdevice *miscdev = file->private_data; struct ion_device *dev = container_of(miscdev, struct ion_device, dev); struct ion_client *client; char debug_name[64]; snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); client = ion_client_create(dev, debug_name); if (IS_ERR(client)) return PTR_ERR(client); file->private_data = client; return 0; } static const struct file_operations ion_fops = { .owner = THIS_MODULE, .open = ion_open, .release = ion_release, .unlocked_ioctl = ion_ioctl, .compat_ioctl = compat_ion_ioctl, }; static size_t ion_debug_heap_total(struct ion_client *client, unsigned int id) { size_t size = 0; struct rb_node *n; mutex_lock(&client->lock); for (n = rb_first(&client->handles); n; n = rb_next(n)) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); if (handle->buffer->heap->id == id) size += handle->buffer->size; } mutex_unlock(&client->lock); return size; } /** * Create a mem_map of the heap. * @param s seq_file to log error message to. * @param heap The heap to create mem_map for. * @param mem_map The mem map to be created. */ void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap, struct list_head *mem_map) { struct ion_device *dev = heap->dev; struct rb_node *cnode; size_t size; struct ion_client *client; if (!heap->ops->phys) return; down_read(&dev->lock); for (cnode = rb_first(&dev->clients); cnode; cnode = rb_next(cnode)) { struct rb_node *hnode; client = rb_entry(cnode, struct ion_client, node); mutex_lock(&client->lock); for (hnode = rb_first(&client->handles); hnode; hnode = rb_next(hnode)) { struct ion_handle *handle = rb_entry( hnode, struct ion_handle, node); if (handle->buffer->heap == heap) { struct mem_map_data *data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) goto inner_error; heap->ops->phys(heap, handle->buffer, &(data->addr), &size); data->size = (unsigned long) size; data->addr_end = data->addr + data->size - 1; data->client_name = kstrdup(client->name, GFP_KERNEL); if (!data->client_name) { kfree(data); goto inner_error; } list_add(&data->node, mem_map); } } mutex_unlock(&client->lock); } up_read(&dev->lock); return; inner_error: seq_puts(s, "ERROR: out of memory. Part of memory map will not be logged\n"); mutex_unlock(&client->lock); up_read(&dev->lock); } /** * Free the memory allocated by ion_debug_mem_map_create * @param mem_map The mem map to free. */ static void ion_debug_mem_map_destroy(struct list_head *mem_map) { if (mem_map) { struct mem_map_data *data, *tmp; list_for_each_entry_safe(data, tmp, mem_map, node) { list_del(&data->node); kfree(data->client_name); kfree(data); } } } static int mem_map_cmp(void *priv, struct list_head *a, struct list_head *b) { struct mem_map_data *d1, *d2; d1 = list_entry(a, struct mem_map_data, node); d2 = list_entry(b, struct mem_map_data, node); if (d1->addr == d2->addr) return d1->size - d2->size; return d1->addr - d2->addr; } /** * Print heap debug information. * @param s seq_file to log message to. * @param heap pointer to heap that we will print debug information for. */ static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap) { if (heap->ops->print_debug) { struct list_head mem_map = LIST_HEAD_INIT(mem_map); ion_debug_mem_map_create(s, heap, &mem_map); list_sort(NULL, &mem_map, mem_map_cmp); heap->ops->print_debug(heap, s, &mem_map); ion_debug_mem_map_destroy(&mem_map); } } static int ion_debug_heap_show(struct seq_file *s, void *unused) { struct ion_heap *heap = s->private; struct ion_device *dev = heap->dev; struct rb_node *n; size_t total_size = 0; size_t total_orphaned_size = 0; seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size"); seq_puts(s, "----------------------------------------------------\n"); mutex_lock(&debugfs_mutex); for (n = rb_first(&dev->clients); n; n = rb_next(n)) { struct ion_client *client = rb_entry(n, struct ion_client, node); size_t size = ion_debug_heap_total(client, heap->id); if (!size) continue; if (client->task) { char task_comm[TASK_COMM_LEN]; get_task_comm(task_comm, client->task); seq_printf(s, "%16s %16u %16zu\n", task_comm, client->pid, size); } else { seq_printf(s, "%16s %16u %16zu\n", client->name, client->pid, size); } } mutex_unlock(&debugfs_mutex); seq_puts(s, "----------------------------------------------------\n"); seq_puts(s, "orphaned allocations (info is from last known client):\n"); mutex_lock(&dev->buffer_lock); for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, node); if (buffer->heap->id != heap->id) continue; total_size += buffer->size; if (!buffer->handle_count) { seq_printf(s, "%16s %16u %16zu %d %d\n", buffer->task_comm, buffer->pid, buffer->size, buffer->kmap_cnt, atomic_read(&buffer->ref.refcount)); total_orphaned_size += buffer->size; } } mutex_unlock(&dev->buffer_lock); seq_puts(s, "----------------------------------------------------\n"); seq_printf(s, "%16s %16zu\n", "total orphaned", total_orphaned_size); seq_printf(s, "%16s %16zu\n", "total ", total_size); if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) seq_printf(s, "%16s %16zu\n", "deferred free", heap->free_list_size); seq_puts(s, "----------------------------------------------------\n"); if (heap->debug_show) heap->debug_show(heap, s, unused); ion_heap_print_debug(s, heap); return 0; } static int ion_debug_heap_open(struct inode *inode, struct file *file) { return single_open(file, ion_debug_heap_show, inode->i_private); } static const struct file_operations debug_heap_fops = { .open = ion_debug_heap_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; void show_ion_usage(struct ion_device *dev) { struct ion_heap *heap; if (!down_read_trylock(&dev->lock)) { pr_err("Ion output would deadlock, can't print debug information\n"); return; } pr_info("%16.s %16.s %16.s\n", "Heap name", "Total heap size", "Total orphaned size"); pr_info("---------------------------------\n"); plist_for_each_entry(heap, &dev->heaps, node) { pr_info("%16.s 0x%16.lx 0x%16.lx\n", heap->name, atomic_long_read(&heap->total_allocated), atomic_long_read(&heap->total_allocated) - atomic_long_read(&heap->total_handles)); if (heap->debug_show) heap->debug_show(heap, NULL, 0); } up_read(&dev->lock); } #ifdef DEBUG_HEAP_SHRINKER static int debug_shrink_set(void *data, u64 val) { struct ion_heap *heap = data; struct shrink_control sc; int objs; sc.gfp_mask = -1; sc.nr_to_scan = val; if (!val) { objs = heap->shrinker.count_objects(&heap->shrinker, &sc); sc.nr_to_scan = objs; } heap->shrinker.scan_objects(&heap->shrinker, &sc); return 0; } static int debug_shrink_get(void *data, u64 *val) { struct ion_heap *heap = data; struct shrink_control sc; int objs; sc.gfp_mask = -1; sc.nr_to_scan = 0; objs = heap->shrinker.count_objects(&heap->shrinker, &sc); *val = objs; return 0; } DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, debug_shrink_set, "%llu\n"); #endif void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) { struct dentry *debug_file; if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || !heap->ops->unmap_dma) pr_err("%s: can not add heap with invalid ops struct.\n", __func__); spin_lock_init(&heap->free_lock); heap->free_list_size = 0; if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) ion_heap_init_deferred_free(heap); if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) ion_heap_init_shrinker(heap); heap->dev = dev; down_write(&dev->lock); /* * use negative heap->id to reverse the priority -- when traversing * the list later attempt higher id numbers first */ plist_node_init(&heap->node, -heap->id); plist_add(&heap->node, &dev->heaps); debug_file = debugfs_create_file(heap->name, 0664, dev->heaps_debug_root, heap, &debug_heap_fops); if (!debug_file) { char buf[256], *path; path = dentry_path(dev->heaps_debug_root, buf, 256); pr_err("Failed to create heap debugfs at %s/%s\n", path, heap->name); } #ifdef DEBUG_HEAP_SHRINKER if (heap->shrinker.count_objects && heap->shrinker.scan_objects) { char debug_name[64]; snprintf(debug_name, 64, "%s_shrink", heap->name); debug_file = debugfs_create_file( debug_name, 0644, dev->heaps_debug_root, heap, &debug_shrink_fops); if (!debug_file) { char buf[256], *path; path = dentry_path(dev->heaps_debug_root, buf, 256); pr_err("Failed to create heap shrinker debugfs at %s/%s\n", path, debug_name); } } #endif up_write(&dev->lock); } EXPORT_SYMBOL(ion_device_add_heap); int ion_walk_heaps(struct ion_client *client, int heap_id, enum ion_heap_type type, void *data, int (*f)(struct ion_heap *heap, void *data)) { int ret_val = 0; struct ion_heap *heap; struct ion_device *dev = client->dev; /* * traverse the list of heaps available in this system * and find the heap that is specified. */ down_write(&dev->lock); plist_for_each_entry(heap, &dev->heaps, node) { if (ION_HEAP(heap->id) != heap_id || type != heap->type) continue; ret_val = f(heap, data); break; } up_write(&dev->lock); return ret_val; } EXPORT_SYMBOL(ion_walk_heaps); struct ion_device *ion_device_create(long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, unsigned long arg)) { struct ion_device *idev; int ret; idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); if (!idev) return ERR_PTR(-ENOMEM); idev->dev.minor = MISC_DYNAMIC_MINOR; idev->dev.name = "ion"; idev->dev.fops = &ion_fops; idev->dev.parent = NULL; ret = misc_register(&idev->dev); if (ret) { pr_err("ion: failed to register misc device.\n"); kfree(idev); return ERR_PTR(ret); } idev->debug_root = debugfs_create_dir("ion", NULL); if (!idev->debug_root) { pr_err("ion: failed to create debugfs root directory.\n"); goto debugfs_done; } idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root); if (!idev->heaps_debug_root) { pr_err("ion: failed to create debugfs heaps directory.\n"); goto debugfs_done; } idev->clients_debug_root = debugfs_create_dir("clients", idev->debug_root); if (!idev->clients_debug_root) pr_err("ion: failed to create debugfs clients directory.\n"); debugfs_done: idev->custom_ioctl = custom_ioctl; idev->buffers = RB_ROOT; mutex_init(&idev->buffer_lock); init_rwsem(&idev->lock); plist_head_init(&idev->heaps); idev->clients = RB_ROOT; ion_root_client = &idev->clients; mutex_init(&debugfs_mutex); return idev; } EXPORT_SYMBOL(ion_device_create); void ion_device_destroy(struct ion_device *dev) { misc_deregister(&dev->dev); debugfs_remove_recursive(dev->debug_root); /* XXX need to free the heaps and clients ? */ kfree(dev); } EXPORT_SYMBOL(ion_device_destroy); void __init ion_reserve(struct ion_platform_data *data) { int i; for (i = 0; i < data->nr; i++) { if (data->heaps[i].size == 0) continue; if (data->heaps[i].base == 0) { phys_addr_t paddr; paddr = memblock_alloc_base(data->heaps[i].size, data->heaps[i].align, MEMBLOCK_ALLOC_ANYWHERE); if (!paddr) { pr_err("%s: error allocating memblock for heap %d\n", __func__, i); continue; } data->heaps[i].base = paddr; } else { int ret = memblock_reserve(data->heaps[i].base, data->heaps[i].size); if (ret) pr_err("memblock reserve of %zx@%pa failed\n", data->heaps[i].size, &data->heaps[i].base); } pr_info("%s: %s reserved base %pa size %zu\n", __func__, data->heaps[i].name, &data->heaps[i].base, data->heaps[i].size); } } void lock_client(struct ion_client *client) { mutex_lock(&client->lock); } void unlock_client(struct ion_client *client) { mutex_unlock(&client->lock); } struct ion_buffer *get_buffer(struct ion_handle *handle) { return handle->buffer; }