--- zzzz-none-000/linux-4.4.271/drivers/staging/android/ion/ion_priv.h 2021-06-03 06:22:09.000000000 +0000 +++ hawkeye-5590-750/linux-4.4.271/drivers/staging/android/ion/ion_priv.h 2023-04-19 10:22:29.000000000 +0000 @@ -2,6 +2,7 @@ * drivers/staging/android/ion/ion_priv.h * * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -23,9 +24,16 @@ #include #include #include +#include + +#include "msm_ion_priv.h" #include #include #include +#ifdef CONFIG_ION_POOL_CACHE_POLICY +#include +#endif +#include #include "ion.h" @@ -46,9 +54,12 @@ * an ion_phys_addr_t (and someday a phys_addr_t) * @lock: protects the buffers cnt fields * @kmap_cnt: number of times the buffer is mapped to the kernel - * @vaddr: the kernel mapping if kmap_cnt is not zero - * @dmap_cnt: number of times the buffer is mapped for dma - * @sg_table: the sg table for the buffer if dmap_cnt is not zero + * @vaddr: the kenrel mapping if kmap_cnt is not zero + * @sg_table: the sg table for the buffer. Note that if you need + * an sg_table for this buffer, you should likely be + * using Ion as a DMA Buf exporter and using + * dma_buf_map_attachment rather than trying to use this + * field directly. * @pages: flat array of pages in the buffer -- used by fault * handler and only valid for buffers that are faulted in * @vmas: list of vma's mapping this buffer @@ -76,7 +87,6 @@ struct mutex lock; int kmap_cnt; void *vaddr; - int dmap_cnt; struct sg_table *sg_table; struct page **pages; struct list_head vmas; @@ -90,7 +100,11 @@ /** * struct ion_heap_ops - ops to operate on a given heap * @allocate: allocate memory - * @free: free memory + * @free: free memory. Will be called with + * ION_PRIV_FLAG_SHRINKER_FREE set in buffer flags when + * called from a shrinker. In that case, the pages being + * free'd must be truly free'd back to the system, not put + * in a page pool or otherwise cached. * @phys get physical address of a buffer (only define on * physically contiguous heaps) * @map_dma map the memory for dma to a scatterlist @@ -98,6 +112,7 @@ * @map_kernel map memory to the kernel * @unmap_kernel unmap memory to the kernel * @map_user map memory to userspace + * @unmap_user unmap memory to userspace * * allocate, phys, and map_user return 0 on success, -errno on error. * map_dma and map_kernel return pointer on success, ERR_PTR on @@ -121,6 +136,9 @@ int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer, struct vm_area_struct *vma); int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan); + void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer); + int (*print_debug)(struct ion_heap *heap, struct seq_file *s, + const struct list_head *mem_map); }; /** @@ -151,6 +169,7 @@ * MUST be unique * @name: used for debugging * @shrinker: a shrinker for the heap + * @priv: private heap data * @free_list: free list head if deferred free is used * @free_list_size size of the deferred free list in bytes * @lock: protects the free list @@ -173,6 +192,7 @@ unsigned int id; const char *name; struct shrinker shrinker; + void *priv; struct list_head free_list; size_t free_list_size; spinlock_t free_lock; @@ -180,6 +200,8 @@ struct task_struct *task; int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); + atomic_long_t total_allocated; + atomic_long_t total_handles; }; /** @@ -223,6 +245,12 @@ */ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); +struct pages_mem { + struct page **pages; + u32 size; + void (*free_fn) (const void *); +}; + /** * some helpers for common operations on buffers using the sg_table * and vaddr fields @@ -234,6 +262,32 @@ int ion_heap_buffer_zero(struct ion_buffer *buffer); int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); +int msm_ion_heap_high_order_page_zero(struct device *dev, struct page *page, + int order); +struct ion_heap *get_ion_heap(int heap_id); +int msm_ion_heap_sg_table_zero(struct device *dev, struct sg_table *, + size_t size); +int msm_ion_heap_pages_zero(struct page **pages, int num_pages); +int msm_ion_heap_alloc_pages_mem(struct pages_mem *pages_mem); +void msm_ion_heap_free_pages_mem(struct pages_mem *pages_mem); + +/** + * Functions to help assign/unassign sg_table for System Secure Heap + */ + +int ion_system_secure_heap_unassign_sg(struct sg_table *sgt, int source_vmid); +int ion_system_secure_heap_assign_sg(struct sg_table *sgt, int dest_vmid); + +/** + * ion_heap_init_shrinker + * @heap: the heap + * + * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op + * this function will be called to setup a shrinker to shrink the freelists + * and call the heap's shrink op. + */ +void ion_heap_init_shrinker(struct ion_heap *heap); + /** * ion_heap_init_shrinker * @heap: the heap @@ -276,7 +330,7 @@ size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); /** - * ion_heap_freelist_shrink - drain the deferred free + * ion_heap_freelist_drain_from_shrinker - drain the deferred free * list, skipping any heap-specific * pooling or caching mechanisms * @@ -292,10 +346,10 @@ * page pools or otherwise cache the pages. Everything must be * genuinely free'd back to the system. If you're free'ing from a * shrinker you probably want to use this. Note that this relies on - * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE - * flag. + * the heap.ops.free callback honoring the + * ION_PRIV_FLAG_SHRINKER_FREE flag. */ -size_t ion_heap_freelist_shrink(struct ion_heap *heap, +size_t ion_heap_freelist_drain_from_shrinker(struct ion_heap *heap, size_t size); /** @@ -324,8 +378,16 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *); void ion_chunk_heap_destroy(struct ion_heap *); +#ifdef CONFIG_CMA struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *); void ion_cma_heap_destroy(struct ion_heap *); +#else +static inline struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *h) +{ + return NULL; +} +static inline void ion_cma_heap_destroy(struct ion_heap *h) {} +#endif /** * kernel api to allocate/free from carveout -- used when carveout is @@ -372,15 +434,51 @@ struct list_head high_items; struct list_head low_items; struct mutex mutex; + struct device *dev; gfp_t gfp_mask; unsigned int order; struct plist_node list; }; -struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order); +struct ion_page_pool *ion_page_pool_create(struct device *dev, gfp_t gfp_mask, + unsigned int order); void ion_page_pool_destroy(struct ion_page_pool *); -struct page *ion_page_pool_alloc(struct ion_page_pool *); +void *ion_page_pool_alloc(struct ion_page_pool *, bool *from_pool); +void *ion_page_pool_alloc_pool_only(struct ion_page_pool *); void ion_page_pool_free(struct ion_page_pool *, struct page *); +void ion_page_pool_free_immediate(struct ion_page_pool *, struct page *); +int ion_page_pool_total(struct ion_page_pool *pool, bool high); +size_t ion_system_heap_secure_page_pool_total(struct ion_heap *heap, int vmid); + +#ifdef CONFIG_ION_POOL_CACHE_POLICY +static inline void ion_page_pool_alloc_set_cache_policy + (struct ion_page_pool *pool, + struct page *page){ + void *va = page_address(page); + + if (va) + set_memory_wc((unsigned long)va, 1 << pool->order); +} + +static inline void ion_page_pool_free_set_cache_policy + (struct ion_page_pool *pool, + struct page *page){ + void *va = page_address(page); + + if (va) + set_memory_wb((unsigned long)va, 1 << pool->order); + +} +#else +static inline void ion_page_pool_alloc_set_cache_policy + (struct ion_page_pool *pool, + struct page *page){ } + +static inline void ion_page_pool_free_set_cache_policy + (struct ion_page_pool *pool, + struct page *page){ } +#endif + /** ion_page_pool_shrink - shrinks the size of the memory cached in the pool * @pool: the pool @@ -403,4 +501,38 @@ void ion_pages_sync_for_device(struct device *dev, struct page *page, size_t size, enum dma_data_direction dir); +int ion_walk_heaps(struct ion_client *client, int heap_id, + enum ion_heap_type type, void *data, + int (*f)(struct ion_heap *heap, void *data)); + +struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, + int id); + +int ion_handle_put(struct ion_handle *handle); + +bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle); + +void lock_client(struct ion_client *client); + +void unlock_client(struct ion_client *client); + +struct ion_buffer *get_buffer(struct ion_handle *handle); + +/** + * This function is same as ion_free() except it won't use client->lock. + */ +void ion_free_nolock(struct ion_client *client, struct ion_handle *handle); + +/** + * This function is same as ion_phys() except it won't use client->lock. + */ +int ion_phys_nolock(struct ion_client *client, struct ion_handle *handle, + ion_phys_addr_t *addr, size_t *len); + +/** + * This function is same as ion_import_dma_buf() except it won't use + * client->lock. + */ +struct ion_handle *ion_import_dma_buf_nolock(struct ion_client *client, int fd); + #endif /* _ION_PRIV_H */