--- zzzz-none-000/linux-3.10.107/drivers/gpu/drm/radeon/radeon_object.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/gpu/drm/radeon/radeon_object.c 2021-02-04 17:41:59.000000000 +0000 @@ -33,6 +33,7 @@ #include #include #include +#include #include "radeon.h" #include "radeon_trace.h" @@ -46,13 +47,25 @@ * function are calling it. */ -void radeon_bo_clear_va(struct radeon_bo *bo) +static void radeon_update_memory_usage(struct radeon_bo *bo, + unsigned mem_type, int sign) { - struct radeon_bo_va *bo_va, *tmp; + struct radeon_device *rdev = bo->rdev; + u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; - list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { - /* remove from all vm address space */ - radeon_vm_bo_rmv(bo->rdev, bo_va); + switch (mem_type) { + case TTM_PL_TT: + if (sign > 0) + atomic64_add(size, &rdev->gtt_usage); + else + atomic64_sub(size, &rdev->gtt_usage); + break; + case TTM_PL_VRAM: + if (sign > 0) + atomic64_add(size, &rdev->vram_usage); + else + atomic64_sub(size, &rdev->vram_usage); + break; } } @@ -61,11 +74,14 @@ struct radeon_bo *bo; bo = container_of(tbo, struct radeon_bo, tbo); + + radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); + mutex_lock(&bo->rdev->gem.mutex); list_del_init(&bo->list); mutex_unlock(&bo->rdev->gem.mutex); radeon_bo_clear_surface_reg(bo); - radeon_bo_clear_va(bo); + WARN_ON(!list_empty(&bo->va)); drm_gem_object_release(&bo->gem_base); kfree(bo); } @@ -79,38 +95,91 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) { - u32 c = 0; + u32 c = 0, i; - rbo->placement.fpfn = 0; - rbo->placement.lpfn = 0; rbo->placement.placement = rbo->placements; rbo->placement.busy_placement = rbo->placements; - if (domain & RADEON_GEM_DOMAIN_VRAM) - rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + if (domain & RADEON_GEM_DOMAIN_VRAM) { + /* Try placing BOs which don't need CPU access outside of the + * CPU accessible part of VRAM + */ + if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) && + rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) { + rbo->placements[c].fpfn = + rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; + rbo->placements[c++].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_VRAM; + } + + rbo->placements[c].fpfn = 0; + rbo->placements[c++].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_VRAM; + } + if (domain & RADEON_GEM_DOMAIN_GTT) { - if (rbo->rdev->flags & RADEON_IS_AGP) { - rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT; + if (rbo->flags & RADEON_GEM_GTT_UC) { + rbo->placements[c].fpfn = 0; + rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_TT; + + } else if ((rbo->flags & RADEON_GEM_GTT_WC) || + (rbo->rdev->flags & RADEON_IS_AGP)) { + rbo->placements[c].fpfn = 0; + rbo->placements[c++].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_TT; } else { - rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; + rbo->placements[c].fpfn = 0; + rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_TT; } } + if (domain & RADEON_GEM_DOMAIN_CPU) { - if (rbo->rdev->flags & RADEON_IS_AGP) { - rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM; + if (rbo->flags & RADEON_GEM_GTT_UC) { + rbo->placements[c].fpfn = 0; + rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_SYSTEM; + + } else if ((rbo->flags & RADEON_GEM_GTT_WC) || + rbo->rdev->flags & RADEON_IS_AGP) { + rbo->placements[c].fpfn = 0; + rbo->placements[c++].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_SYSTEM; } else { - rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; + rbo->placements[c].fpfn = 0; + rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_SYSTEM; } } - if (!c) - rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + if (!c) { + rbo->placements[c].fpfn = 0; + rbo->placements[c++].flags = TTM_PL_MASK_CACHING | + TTM_PL_FLAG_SYSTEM; + } + rbo->placement.num_placement = c; rbo->placement.num_busy_placement = c; + + for (i = 0; i < c; ++i) { + if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && + (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && + !rbo->placements[i].fpfn) + rbo->placements[i].lpfn = + rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; + else + rbo->placements[i].lpfn = 0; + } } int radeon_bo_create(struct radeon_device *rdev, - unsigned long size, int byte_align, bool kernel, u32 domain, - struct sg_table *sg, struct radeon_bo **bo_ptr) + unsigned long size, int byte_align, bool kernel, + u32 domain, u32 flags, struct sg_table *sg, + struct reservation_object *resv, + struct radeon_bo **bo_ptr) { struct radeon_bo *bo; enum ttm_bo_type type; @@ -120,7 +189,6 @@ size = ALIGN(size, PAGE_SIZE); - rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; if (kernel) { type = ttm_bo_type_kernel; } else if (sg) { @@ -142,16 +210,56 @@ return r; } bo->rdev = rdev; - bo->gem_base.driver_private = NULL; bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); INIT_LIST_HEAD(&bo->va); + bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | + RADEON_GEM_DOMAIN_GTT | + RADEON_GEM_DOMAIN_CPU); + + bo->flags = flags; + /* PCI GART is always snooped */ + if (!(rdev->flags & RADEON_IS_PCIE)) + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); + + /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx + * See https://bugs.freedesktop.org/show_bug.cgi?id=91268 + */ + if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635) + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); + +#ifdef CONFIG_X86_32 + /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit + * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 + */ + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); +#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) + /* Don't try to enable write-combining when it can't work, or things + * may be slow + * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 + */ + +#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ + thanks to write-combining + + if (bo->flags & RADEON_GEM_GTT_WC) + DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " + "better performance thanks to write-combining\n"); + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); +#else + /* For architectures that don't support WC memory, + * mask out the WC flag from the BO + */ + if (!drm_arch_can_wc_memory()) + bo->flags &= ~RADEON_GEM_GTT_WC; +#endif + radeon_ttm_placement_from_domain(bo, domain); /* Kernel allocation are uninterruptible */ down_read(&rdev->pm.mclk_lock); r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, !kernel, NULL, - acc_size, sg, &radeon_ttm_bo_destroy); + acc_size, sg, resv, &radeon_ttm_bo_destroy); up_read(&rdev->pm.mclk_lock); if (unlikely(r != 0)) { return r; @@ -195,6 +303,15 @@ ttm_bo_kunmap(&bo->kmap); } +struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) +{ + if (bo == NULL) + return NULL; + + ttm_bo_reference(&bo->tbo); + return bo; +} + void radeon_bo_unref(struct radeon_bo **bo) { struct ttm_buffer_object *tbo; @@ -204,9 +321,7 @@ return; rdev = (*bo)->rdev; tbo = &((*bo)->tbo); - down_read(&rdev->pm.mclk_lock); ttm_bo_unref(&tbo); - up_read(&rdev->pm.mclk_lock); if (tbo == NULL) *bo = NULL; } @@ -216,6 +331,9 @@ { int r, i; + if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) + return -EPERM; + if (bo->pin_count) { bo->pin_count++; if (gpu_addr) @@ -235,29 +353,31 @@ return 0; } radeon_ttm_placement_from_domain(bo, domain); - if (domain == RADEON_GEM_DOMAIN_VRAM) { + for (i = 0; i < bo->placement.num_placement; i++) { /* force to pin into visible video ram */ - bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; - } - if (max_offset) { - u64 lpfn = max_offset >> PAGE_SHIFT; - - if (!bo->placement.lpfn) - bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; + if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && + !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && + (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) + bo->placements[i].lpfn = + bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; + else + bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; - if (lpfn < bo->placement.lpfn) - bo->placement.lpfn = lpfn; + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; } - for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); if (likely(r == 0)) { bo->pin_count = 1; if (gpu_addr != NULL) *gpu_addr = radeon_bo_gpu_offset(bo); - } - if (unlikely(r != 0)) + if (domain == RADEON_GEM_DOMAIN_VRAM) + bo->rdev->vram_pin_size += radeon_bo_size(bo); + else + bo->rdev->gart_pin_size += radeon_bo_size(bo); + } else { dev_err(bo->rdev->dev, "%p pin failed\n", bo); + } return r; } @@ -277,11 +397,19 @@ bo->pin_count--; if (bo->pin_count) return 0; - for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; + for (i = 0; i < bo->placement.num_placement; i++) { + bo->placements[i].lpfn = 0; + bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; + } r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); - if (unlikely(r != 0)) + if (likely(r == 0)) { + if (bo->tbo.mem.mem_type == TTM_PL_VRAM) + bo->rdev->vram_pin_size -= radeon_bo_size(bo); + else + bo->rdev->gart_pin_size -= radeon_bo_size(bo); + } else { dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); + } return r; } @@ -305,7 +433,6 @@ } dev_err(rdev->dev, "Userspace still has active objects !\n"); list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { - mutex_lock(&rdev->ddev->struct_mutex); dev_err(rdev->dev, "%p %p %lu %lu force free\n", &bo->gem_base, bo, (unsigned long)bo->gem_base.size, *((unsigned long *)&bo->gem_base.refcount)); @@ -313,8 +440,7 @@ list_del_init(&bo->list); mutex_unlock(&bo->rdev->gem.mutex); /* this should unref the ttm bo */ - drm_gem_object_unreference(&bo->gem_base); - mutex_unlock(&rdev->ddev->struct_mutex); + drm_gem_object_unreference_unlocked(&bo->gem_base); } } @@ -322,8 +448,8 @@ { /* Add an MTRR for the VRAM */ if (!rdev->fastfb_working) { - rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, - MTRR_TYPE_WRCOMB, 1); + rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, + rdev->mc.aper_size); } DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", rdev->mc.mc_vram_size >> 20, @@ -336,58 +462,130 @@ void radeon_bo_fini(struct radeon_device *rdev) { radeon_ttm_fini(rdev); + arch_phys_wc_del(rdev->mc.vram_mtrr); } -void radeon_bo_list_add_object(struct radeon_bo_list *lobj, - struct list_head *head) +/* Returns how many bytes TTM can move per IB. + */ +static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) { - if (lobj->written) { - list_add(&lobj->tv.head, head); - } else { - list_add_tail(&lobj->tv.head, head); - } -} + u64 real_vram_size = rdev->mc.real_vram_size; + u64 vram_usage = atomic64_read(&rdev->vram_usage); -int radeon_bo_list_validate(struct list_head *head, int ring) + /* This function is based on the current VRAM usage. + * + * - If all of VRAM is free, allow relocating the number of bytes that + * is equal to 1/4 of the size of VRAM for this IB. + + * - If more than one half of VRAM is occupied, only allow relocating + * 1 MB of data for this IB. + * + * - From 0 to one half of used VRAM, the threshold decreases + * linearly. + * __________________ + * 1/4 of -|\ | + * VRAM | \ | + * | \ | + * | \ | + * | \ | + * | \ | + * | \ | + * | \________|1 MB + * |----------------| + * VRAM 0 % 100 % + * used used + * + * Note: It's a threshold, not a limit. The threshold must be crossed + * for buffer relocations to stop, so any buffer of an arbitrary size + * can be moved as long as the threshold isn't crossed before + * the relocation takes place. We don't want to disable buffer + * relocations completely. + * + * The idea is that buffers should be placed in VRAM at creation time + * and TTM should only do a minimum number of relocations during + * command submission. In practice, you need to submit at least + * a dozen IBs to move all buffers to VRAM if they are in GTT. + * + * Also, things can get pretty crazy under memory pressure and actual + * VRAM usage can change a lot, so playing safe even at 50% does + * consistently increase performance. + */ + + u64 half_vram = real_vram_size >> 1; + u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; + u64 bytes_moved_threshold = half_free_vram >> 1; + return max(bytes_moved_threshold, 1024*1024ull); +} + +int radeon_bo_list_validate(struct radeon_device *rdev, + struct ww_acquire_ctx *ticket, + struct list_head *head, int ring) { struct radeon_bo_list *lobj; - struct radeon_bo *bo; - u32 domain; + struct list_head duplicates; int r; + u64 bytes_moved = 0, initial_bytes_moved; + u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); - r = ttm_eu_reserve_buffers(head); + INIT_LIST_HEAD(&duplicates); + r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates); if (unlikely(r != 0)) { return r; } + list_for_each_entry(lobj, head, tv.head) { - bo = lobj->bo; + struct radeon_bo *bo = lobj->robj; if (!bo->pin_count) { - domain = lobj->domain; - + u32 domain = lobj->prefered_domains; + u32 allowed = lobj->allowed_domains; + u32 current_domain = + radeon_mem_type_to_domain(bo->tbo.mem.mem_type); + + /* Check if this buffer will be moved and don't move it + * if we have moved too many buffers for this IB already. + * + * Note that this allows moving at least one buffer of + * any size, because it doesn't take the current "bo" + * into account. We don't want to disallow buffer moves + * completely. + */ + if ((allowed & current_domain) != 0 && + (domain & current_domain) == 0 && /* will be moved */ + bytes_moved > bytes_moved_threshold) { + /* don't move it */ + domain = current_domain; + } + retry: radeon_ttm_placement_from_domain(bo, domain); if (ring == R600_RING_TYPE_UVD_INDEX) - radeon_uvd_force_into_uvd_segment(bo); - r = ttm_bo_validate(&bo->tbo, &bo->placement, - true, false); + radeon_uvd_force_into_uvd_segment(bo, allowed); + + initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); + r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); + bytes_moved += atomic64_read(&rdev->num_bytes_moved) - + initial_bytes_moved; + if (unlikely(r)) { - if (r != -ERESTARTSYS && domain != lobj->alt_domain) { - domain = lobj->alt_domain; + if (r != -ERESTARTSYS && + domain != lobj->allowed_domains) { + domain = lobj->allowed_domains; goto retry; } + ttm_eu_backoff_reservation(ticket, head); return r; } } lobj->gpu_offset = radeon_bo_gpu_offset(bo); lobj->tiling_flags = bo->tiling_flags; } - return 0; -} -int radeon_bo_fbdev_mmap(struct radeon_bo *bo, - struct vm_area_struct *vma) -{ - return ttm_fbdev_mmap(vma, &bo->tbo); + list_for_each_entry(lobj, &duplicates, tv.head) { + lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj); + lobj->tiling_flags = lobj->robj->tiling_flags; + } + + return 0; } int radeon_bo_get_surface_reg(struct radeon_bo *bo) @@ -398,7 +596,7 @@ int steal; int i; - BUG_ON(!radeon_bo_is_reserved(bo)); + lockdep_assert_held(&bo->tbo.resv->lock.base); if (!bo->tiling_flags) return 0; @@ -524,7 +722,8 @@ uint32_t *tiling_flags, uint32_t *pitch) { - BUG_ON(!radeon_bo_is_reserved(bo)); + lockdep_assert_held(&bo->tbo.resv->lock.base); + if (tiling_flags) *tiling_flags = bo->tiling_flags; if (pitch) @@ -534,7 +733,8 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, bool force_drop) { - BUG_ON(!radeon_bo_is_reserved(bo) && !force_drop); + if (!force_drop) + lockdep_assert_held(&bo->tbo.resv->lock.base); if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) return 0; @@ -560,22 +760,31 @@ } void radeon_bo_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem) + struct ttm_mem_reg *new_mem) { struct radeon_bo *rbo; + if (!radeon_ttm_bo_is_radeon_bo(bo)) return; + rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_check_tiling(rbo, 0, 1); radeon_vm_bo_invalidate(rbo->rdev, rbo); + + /* update statistics */ + if (!new_mem) + return; + + radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); + radeon_update_memory_usage(rbo, new_mem->mem_type, 1); } int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) { struct radeon_device *rdev; struct radeon_bo *rbo; - unsigned long offset, size; - int r; + unsigned long offset, size, lpfn; + int i, r; if (!radeon_ttm_bo_is_radeon_bo(bo)) return 0; @@ -592,7 +801,13 @@ /* hurrah the memory is not visible ! */ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); - rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; + lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; + for (i = 0; i < rbo->placement.num_placement; i++) { + /* Force into visible VRAM */ + if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && + (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn)) + rbo->placements[i].lpfn = lpfn; + } r = ttm_bo_validate(bo, &rbo->placement, false, false); if (unlikely(r == -ENOMEM)) { radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); @@ -613,38 +828,32 @@ { int r; - r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); + r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); if (unlikely(r != 0)) return r; - spin_lock(&bo->tbo.bdev->fence_lock); if (mem_type) *mem_type = bo->tbo.mem.mem_type; - if (bo->tbo.sync_obj) - r = ttm_bo_wait(&bo->tbo, true, true, no_wait); - spin_unlock(&bo->tbo.bdev->fence_lock); + + r = ttm_bo_wait(&bo->tbo, true, true, no_wait); ttm_bo_unreserve(&bo->tbo); return r; } - /** - * radeon_bo_reserve - reserve bo - * @bo: bo structure - * @no_intr: don't return -ERESTARTSYS on pending signal + * radeon_bo_fence - add fence to buffer object + * + * @bo: buffer object in question + * @fence: fence to add + * @shared: true if fence should be added shared * - * Returns: - * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by - * a signal. Release all buffer reservations and return to user-space. */ -int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr) +void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, + bool shared) { - int r; + struct reservation_object *resv = bo->tbo.resv; - r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0); - if (unlikely(r != 0)) { - if (r != -ERESTARTSYS) - dev_err(bo->rdev->dev, "%p reserve failed\n", bo); - return r; - } - return 0; + if (shared) + reservation_object_add_shared_fence(resv, &fence->base); + else + reservation_object_add_excl_fence(resv, &fence->base); }