--- zzzz-none-000/linux-3.10.107/drivers/gpu/drm/ttm/ttm_page_alloc.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/gpu/drm/ttm/ttm_page_alloc.c 2021-02-04 17:41:59.000000000 +0000 @@ -297,9 +297,12 @@ * * @pool: to free the pages from * @free_all: If set to true will free all pages in pool + * @use_static: Safe to use static buffer **/ -static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) +static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, + bool use_static) { + static struct page *static_buf[NUM_PAGES_TO_ALLOC]; unsigned long irq_flags; struct page *p; struct page **pages_to_free; @@ -309,8 +312,11 @@ if (NUM_PAGES_TO_ALLOC < nr_free) npages_to_free = NUM_PAGES_TO_ALLOC; - pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), - GFP_KERNEL); + if (use_static) + pages_to_free = static_buf; + else + pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), + GFP_KERNEL); if (!pages_to_free) { pr_err("Failed to allocate memory for pool free operation\n"); return 0; @@ -373,49 +379,63 @@ if (freed_pages) ttm_pages_put(pages_to_free, freed_pages); out: - kfree(pages_to_free); + if (pages_to_free != static_buf) + kfree(pages_to_free); return nr_free; } -/* Get good estimation how many pages are free in pools */ -static int ttm_pool_get_num_unused_pages(void) -{ - unsigned i; - int total = 0; - for (i = 0; i < NUM_POOLS; ++i) - total += _manager->pools[i].npages; - - return total; -} - /** * Callback for mm to request pool to reduce number of page held. + * + * XXX: (dchinner) Deadlock warning! + * + * This code is crying out for a shrinker per pool.... */ -static int ttm_pool_mm_shrink(struct shrinker *shrink, - struct shrink_control *sc) +static unsigned long +ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { - static atomic_t start_pool = ATOMIC_INIT(0); + static DEFINE_MUTEX(lock); + static unsigned start_pool; unsigned i; - unsigned pool_offset = atomic_add_return(1, &start_pool); + unsigned pool_offset; struct ttm_page_pool *pool; int shrink_pages = sc->nr_to_scan; + unsigned long freed = 0; - pool_offset = pool_offset % NUM_POOLS; + if (!mutex_trylock(&lock)) + return SHRINK_STOP; + pool_offset = ++start_pool % NUM_POOLS; /* select start pool in round robin fashion */ for (i = 0; i < NUM_POOLS; ++i) { unsigned nr_free = shrink_pages; if (shrink_pages == 0) break; pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; - shrink_pages = ttm_page_pool_free(pool, nr_free); + /* OK to use static buffer since global mutex is held. */ + shrink_pages = ttm_page_pool_free(pool, nr_free, true); + freed += nr_free - shrink_pages; } - /* return estimated number of unused pages in pool */ - return ttm_pool_get_num_unused_pages(); + mutex_unlock(&lock); + return freed; +} + + +static unsigned long +ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) +{ + unsigned i; + unsigned long count = 0; + + for (i = 0; i < NUM_POOLS; ++i) + count += _manager->pools[i].npages; + + return count; } static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) { - manager->mm_shrink.shrink = &ttm_pool_mm_shrink; + manager->mm_shrink.count_objects = ttm_pool_shrink_count; + manager->mm_shrink.scan_objects = ttm_pool_shrink_scan; manager->mm_shrink.seeks = 1; register_shrinker(&manager->mm_shrink); } @@ -694,7 +714,7 @@ } spin_unlock_irqrestore(&pool->lock, irq_flags); if (npages) - ttm_page_pool_free(pool, npages); + ttm_page_pool_free(pool, npages, false); } /* @@ -778,7 +798,7 @@ return 0; } -static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, +static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags, char *name) { spin_lock_init(&pool->lock); @@ -833,8 +853,9 @@ pr_info("Finalizing pool allocator\n"); ttm_pool_mm_shrink_fini(_manager); + /* OK to use static buffer since global mutex is no longer used. */ for (i = 0; i < NUM_POOLS; ++i) - ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); + ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true); kobject_put(&_manager->kobj); _manager = NULL;