--- zzzz-none-000/linux-3.10.107/drivers/gpu/drm/ttm/ttm_execbuf_util.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/gpu/drm/ttm/ttm_execbuf_util.c 2021-02-04 17:41:59.000000000 +0000 @@ -32,23 +32,13 @@ #include #include -static void ttm_eu_backoff_reservation_locked(struct list_head *list) +static void ttm_eu_backoff_reservation_reverse(struct list_head *list, + struct ttm_validate_buffer *entry) { - struct ttm_validate_buffer *entry; - - list_for_each_entry(entry, list, head) { + list_for_each_entry_continue_reverse(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - if (!entry->reserved) - continue; - - if (entry->removed) { - ttm_bo_add_to_lru(bo); - entry->removed = false; - } - entry->reserved = false; - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + __ttm_bo_unreserve(bo); } } @@ -58,31 +48,14 @@ list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - if (!entry->reserved) - continue; + unsigned put_count = ttm_bo_del_from_lru(bo); - if (!entry->removed) { - entry->put_count = ttm_bo_del_from_lru(bo); - entry->removed = true; - } + ttm_bo_list_ref_sub(bo, put_count, true); } } -static void ttm_eu_list_ref_sub(struct list_head *list) -{ - struct ttm_validate_buffer *entry; - - list_for_each_entry(entry, list, head) { - struct ttm_buffer_object *bo = entry->bo; - - if (entry->put_count) { - ttm_bo_list_ref_sub(bo, entry->put_count, true); - entry->put_count = 0; - } - } -} - -void ttm_eu_backoff_reservation(struct list_head *list) +void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, + struct list_head *list) { struct ttm_validate_buffer *entry; struct ttm_bo_global *glob; @@ -92,9 +65,18 @@ entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; + spin_lock(&glob->lru_lock); - ttm_eu_backoff_reservation_locked(list); + list_for_each_entry(entry, list, head) { + struct ttm_buffer_object *bo = entry->bo; + + ttm_bo_add_to_lru(bo); + __ttm_bo_unreserve(bo); + } spin_unlock(&glob->lru_lock); + + if (ticket) + ww_acquire_fini(ticket); } EXPORT_SYMBOL(ttm_eu_backoff_reservation); @@ -110,102 +92,95 @@ * buffers in different orders. */ -int ttm_eu_reserve_buffers(struct list_head *list) +int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, + struct list_head *list, bool intr, + struct list_head *dups) { struct ttm_bo_global *glob; struct ttm_validate_buffer *entry; int ret; - uint32_t val_seq; if (list_empty(list)) return 0; - list_for_each_entry(entry, list, head) { - entry->reserved = false; - entry->put_count = 0; - entry->removed = false; - } - entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; - spin_lock(&glob->lru_lock); - val_seq = entry->bo->bdev->val_seq++; + if (ticket) + ww_acquire_init(ticket, &reservation_ww_class); -retry: list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - /* already slowpath reserved? */ - if (entry->reserved) + ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true, + ticket); + if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) { + __ttm_bo_unreserve(bo); + + ret = -EBUSY; + + } else if (ret == -EALREADY && dups) { + struct ttm_validate_buffer *safe = entry; + entry = list_prev_entry(entry, head); + list_del(&safe->head); + list_add(&safe->head, dups); continue; + } - ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); - switch (ret) { - case 0: - break; - case -EBUSY: - ttm_eu_del_from_lru_locked(list); - spin_unlock(&glob->lru_lock); - ret = ttm_bo_reserve_nolru(bo, true, false, - true, val_seq); - spin_lock(&glob->lru_lock); + if (!ret) { + if (!entry->shared) + continue; + + ret = reservation_object_reserve_shared(bo->resv); if (!ret) - break; + continue; + } - if (unlikely(ret != -EAGAIN)) - goto err; + /* uh oh, we lost out, drop every reservation and try + * to only reserve this buffer, then start over if + * this succeeds. + */ + ttm_eu_backoff_reservation_reverse(list, entry); + + if (ret == -EDEADLK && intr) { + ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, + ticket); + } else if (ret == -EDEADLK) { + ww_mutex_lock_slow(&bo->resv->lock, ticket); + ret = 0; + } + + if (!ret && entry->shared) + ret = reservation_object_reserve_shared(bo->resv); - /* fallthrough */ - case -EAGAIN: - ttm_eu_backoff_reservation_locked(list); - - /* - * temporarily increase sequence number every retry, - * to prevent us from seeing our old reservation - * sequence when someone else reserved the buffer, - * but hasn't updated the seq_valid/seqno members yet. - */ - val_seq = entry->bo->bdev->val_seq++; - - spin_unlock(&glob->lru_lock); - ttm_eu_list_ref_sub(list); - ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq); - if (unlikely(ret != 0)) - return ret; - spin_lock(&glob->lru_lock); - entry->reserved = true; - if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { - ret = -EBUSY; - goto err; + if (unlikely(ret != 0)) { + if (ret == -EINTR) + ret = -ERESTARTSYS; + if (ticket) { + ww_acquire_done(ticket); + ww_acquire_fini(ticket); } - goto retry; - default: - goto err; + return ret; } - entry->reserved = true; - if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { - ret = -EBUSY; - goto err; - } + /* move this item to the front of the list, + * forces correct iteration of the loop without keeping track + */ + list_del(&entry->head); + list_add(&entry->head, list); } + if (ticket) + ww_acquire_done(ticket); + spin_lock(&glob->lru_lock); ttm_eu_del_from_lru_locked(list); spin_unlock(&glob->lru_lock); - ttm_eu_list_ref_sub(list); - return 0; - -err: - ttm_eu_backoff_reservation_locked(list); - spin_unlock(&glob->lru_lock); - ttm_eu_list_ref_sub(list); - return ret; } EXPORT_SYMBOL(ttm_eu_reserve_buffers); -void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) +void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, + struct list_head *list, struct fence *fence) { struct ttm_validate_buffer *entry; struct ttm_buffer_object *bo; @@ -222,21 +197,18 @@ glob = bo->glob; spin_lock(&glob->lru_lock); - spin_lock(&bdev->fence_lock); list_for_each_entry(entry, list, head) { bo = entry->bo; - entry->old_sync_obj = bo->sync_obj; - bo->sync_obj = driver->sync_obj_ref(sync_obj); - ttm_bo_unreserve_locked(bo); - entry->reserved = false; + if (entry->shared) + reservation_object_add_shared_fence(bo->resv, fence); + else + reservation_object_add_excl_fence(bo->resv, fence); + ttm_bo_add_to_lru(bo); + __ttm_bo_unreserve(bo); } - spin_unlock(&bdev->fence_lock); spin_unlock(&glob->lru_lock); - - list_for_each_entry(entry, list, head) { - if (entry->old_sync_obj) - driver->sync_obj_unref(&entry->old_sync_obj); - } + if (ticket) + ww_acquire_fini(ticket); } EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);