--- zzzz-none-000/linux-3.10.107/drivers/gpu/drm/radeon/radeon_gem.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/gpu/drm/radeon/radeon_gem.c 2021-02-04 17:41:59.000000000 +0000 @@ -29,13 +29,6 @@ #include #include "radeon.h" -int radeon_gem_object_init(struct drm_gem_object *obj) -{ - BUG(); - - return 0; -} - void radeon_gem_object_free(struct drm_gem_object *gobj) { struct radeon_bo *robj = gem_to_radeon_bo(gobj); @@ -43,13 +36,14 @@ if (robj) { if (robj->gem_base.import_attach) drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); + radeon_mn_unregister(robj); radeon_bo_unref(&robj); } } -int radeon_gem_object_create(struct radeon_device *rdev, int size, +int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, int alignment, int initial_domain, - bool discardable, bool kernel, + u32 flags, bool kernel, struct drm_gem_object **obj) { struct radeon_bo *robj; @@ -62,23 +56,26 @@ alignment = PAGE_SIZE; } - /* maximun bo size is the minimun btw visible vram and gtt size */ - max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); + /* Maximum bo size is the unpinned gtt size since we use the gtt to + * handle vram to system pool migrations. + */ + max_size = rdev->mc.gtt_size - rdev->gart_pin_size; if (size > max_size) { - printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n", - __func__, __LINE__, size >> 20, max_size >> 20); + DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", + size >> 20, max_size >> 20); return -ENOMEM; } retry: - r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj); + r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, + flags, NULL, NULL, &robj); if (r) { if (r != -ERESTARTSYS) { if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { initial_domain |= RADEON_GEM_DOMAIN_GTT; goto retry; } - DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", + DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", size, initial_domain, alignment, r); } return r; @@ -93,12 +90,12 @@ return 0; } -int radeon_gem_set_domain(struct drm_gem_object *gobj, +static int radeon_gem_set_domain(struct drm_gem_object *gobj, uint32_t rdomain, uint32_t wdomain) { struct radeon_bo *robj; uint32_t domain; - int r; + long r; /* FIXME: reeimplement */ robj = gem_to_radeon_bo(gobj); @@ -114,9 +111,12 @@ } if (domain == RADEON_GEM_DOMAIN_CPU) { /* Asking for cpu access wait for object idle */ - r = radeon_bo_wait(robj, NULL, false); - if (r) { - printk(KERN_ERR "Failed to wait for object !\n"); + r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); + if (!r) + r = -EBUSY; + + if (r < 0 && r != -EINTR) { + printk(KERN_ERR "Failed to wait for object: %li\n", r); return r; } } @@ -147,7 +147,8 @@ struct radeon_bo_va *bo_va; int r; - if (rdev->family < CHIP_CAYMAN) { + if ((rdev->family < CHIP_CAYMAN) || + (!rdev->accel_working)) { return 0; } @@ -177,7 +178,8 @@ struct radeon_bo_va *bo_va; int r; - if (rdev->family < CHIP_CAYMAN) { + if ((rdev->family < CHIP_CAYMAN) || + (!rdev->accel_working)) { return; } @@ -215,18 +217,15 @@ struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_info *args = data; struct ttm_mem_type_manager *man; - unsigned i; man = &rdev->mman.bdev.man[TTM_PL_VRAM]; args->vram_size = rdev->mc.real_vram_size; args->vram_visible = (u64)man->size << PAGE_SHIFT; - if (rdev->stollen_vga_memory) - args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); - args->vram_visible -= radeon_fbdev_total_size(rdev); - args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024; - for(i = 0; i < RADEON_NUM_RINGS; ++i) - args->gart_size -= rdev->ring[i].ring_size; + args->vram_visible -= rdev->vram_pin_size; + args->gart_size = rdev->mc.gtt_size; + args->gart_size -= rdev->gart_pin_size; + return 0; } @@ -259,8 +258,8 @@ /* create a gem object to contain this object in */ args->size = roundup(args->size, PAGE_SIZE); r = radeon_gem_object_create(rdev, args->size, args->alignment, - args->initial_domain, false, - false, &gobj); + args->initial_domain, args->flags, + false, &gobj); if (r) { up_read(&rdev->exclusive_lock); r = radeon_gem_handle_lockup(rdev, r); @@ -279,6 +278,94 @@ return 0; } +int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct radeon_device *rdev = dev->dev_private; + struct drm_radeon_gem_userptr *args = data; + struct drm_gem_object *gobj; + struct radeon_bo *bo; + uint32_t handle; + int r; + + if (offset_in_page(args->addr | args->size)) + return -EINVAL; + + /* reject unknown flag values */ + if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | + RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | + RADEON_GEM_USERPTR_REGISTER)) + return -EINVAL; + + if (args->flags & RADEON_GEM_USERPTR_READONLY) { + /* readonly pages not tested on older hardware */ + if (rdev->family < CHIP_R600) + return -EINVAL; + + } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || + !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { + + /* if we want to write to it we must require anonymous + memory and install a MMU notifier */ + return -EACCES; + } + + down_read(&rdev->exclusive_lock); + + /* create a gem object to contain this object in */ + r = radeon_gem_object_create(rdev, args->size, 0, + RADEON_GEM_DOMAIN_CPU, 0, + false, &gobj); + if (r) + goto handle_lockup; + + bo = gem_to_radeon_bo(gobj); + r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); + if (r) + goto release_object; + + if (args->flags & RADEON_GEM_USERPTR_REGISTER) { + r = radeon_mn_register(bo, args->addr); + if (r) + goto release_object; + } + + if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { + down_read(¤t->mm->mmap_sem); + r = radeon_bo_reserve(bo, true); + if (r) { + up_read(¤t->mm->mmap_sem); + goto release_object; + } + + radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); + r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); + radeon_bo_unreserve(bo); + up_read(¤t->mm->mmap_sem); + if (r) + goto release_object; + } + + r = drm_gem_handle_create(filp, gobj, &handle); + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(gobj); + if (r) + goto handle_lockup; + + args->handle = handle; + up_read(&rdev->exclusive_lock); + return 0; + +release_object: + drm_gem_object_unreference_unlocked(gobj); + +handle_lockup: + up_read(&rdev->exclusive_lock); + r = radeon_gem_handle_lockup(rdev, r); + + return r; +} + int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { @@ -322,6 +409,10 @@ return -ENOENT; } robj = gem_to_radeon_bo(gobj); + if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { + drm_gem_object_unreference_unlocked(gobj); + return -EPERM; + } *offset_p = radeon_bo_mmap_offset(robj); drm_gem_object_unreference_unlocked(gobj); return 0; @@ -338,7 +429,6 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_busy *args = data; struct drm_gem_object *gobj; struct radeon_bo *robj; @@ -350,21 +440,16 @@ return -ENOENT; } robj = gem_to_radeon_bo(gobj); - r = radeon_bo_wait(robj, &cur_placement, true); - switch (cur_placement) { - case TTM_PL_VRAM: - args->domain = RADEON_GEM_DOMAIN_VRAM; - break; - case TTM_PL_TT: - args->domain = RADEON_GEM_DOMAIN_GTT; - break; - case TTM_PL_SYSTEM: - args->domain = RADEON_GEM_DOMAIN_CPU; - default: - break; - } + + r = reservation_object_test_signaled_rcu(robj->tbo.resv, true); + if (r == 0) + r = -EBUSY; + else + r = 0; + + cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); + args->domain = radeon_mem_type_to_domain(cur_placement); drm_gem_object_unreference_unlocked(gobj); - r = radeon_gem_handle_lockup(rdev, r); return r; } @@ -375,17 +460,27 @@ struct drm_radeon_gem_wait_idle *args = data; struct drm_gem_object *gobj; struct radeon_bo *robj; - int r; + int r = 0; + uint32_t cur_placement = 0; + long ret; gobj = drm_gem_object_lookup(dev, filp, args->handle); if (gobj == NULL) { return -ENOENT; } robj = gem_to_radeon_bo(gobj); - r = radeon_bo_wait(robj, NULL, false); - /* callback hw specific functions if any */ - if (rdev->asic->ioctl_wait_idle) - robj->rdev->asic->ioctl_wait_idle(rdev, robj); + + ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); + if (ret == 0) + r = -EBUSY; + else if (ret < 0) + r = ret; + + /* Flush HDP cache via MMIO if necessary */ + cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); + if (rdev->asic->mmio_hdp_flush && + radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) + robj->rdev->asic->mmio_hdp_flush(rdev); drm_gem_object_unreference_unlocked(gobj); r = radeon_gem_handle_lockup(rdev, r); return r; @@ -432,6 +527,68 @@ return r; } +/** + * radeon_gem_va_update_vm -update the bo_va in its VM + * + * @rdev: radeon_device pointer + * @bo_va: bo_va to update + * + * Update the bo_va directly after setting it's address. Errors are not + * vital here, so they are not reported back to userspace. + */ +static void radeon_gem_va_update_vm(struct radeon_device *rdev, + struct radeon_bo_va *bo_va) +{ + struct ttm_validate_buffer tv, *entry; + struct radeon_bo_list *vm_bos; + struct ww_acquire_ctx ticket; + struct list_head list; + unsigned domain; + int r; + + INIT_LIST_HEAD(&list); + + tv.bo = &bo_va->bo->tbo; + tv.shared = true; + list_add(&tv.head, &list); + + vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); + if (!vm_bos) + return; + + r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); + if (r) + goto error_free; + + list_for_each_entry(entry, &list, head) { + domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type); + /* if anything is swapped out don't swap it in here, + just abort and wait for the next CS */ + if (domain == RADEON_GEM_DOMAIN_CPU) + goto error_unreserve; + } + + mutex_lock(&bo_va->vm->mutex); + r = radeon_vm_clear_freed(rdev, bo_va->vm); + if (r) + goto error_unlock; + + if (bo_va->it.start) + r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem); + +error_unlock: + mutex_unlock(&bo_va->vm->mutex); + +error_unreserve: + ttm_eu_backoff_reservation(&ticket, &list); + +error_free: + drm_free_large(vm_bos); + + if (r && r != -ERESTARTSYS) + DRM_ERROR("Couldn't update BO_VA (%d)\n", r); +} + int radeon_gem_va_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { @@ -479,11 +636,6 @@ args->operation = RADEON_VA_RESULT_ERROR; return -EINVAL; } - if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) { - dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n"); - args->operation = RADEON_VA_RESULT_ERROR; - return -EINVAL; - } switch (args->operation) { case RADEON_VA_MAP: @@ -517,9 +669,10 @@ switch (args->operation) { case RADEON_VA_MAP: - if (bo_va->soffset) { + if (bo_va->it.start) { args->operation = RADEON_VA_RESULT_VA_EXIST; - args->offset = bo_va->soffset; + args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; + radeon_bo_unreserve(rbo); goto out; } r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); @@ -530,12 +683,54 @@ default: break; } + if (!r) + radeon_gem_va_update_vm(rdev, bo_va); args->operation = RADEON_VA_RESULT_OK; if (r) { args->operation = RADEON_VA_RESULT_ERROR; } out: - radeon_bo_unreserve(rbo); + drm_gem_object_unreference_unlocked(gobj); + return r; +} + +int radeon_gem_op_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct drm_radeon_gem_op *args = data; + struct drm_gem_object *gobj; + struct radeon_bo *robj; + int r; + + gobj = drm_gem_object_lookup(dev, filp, args->handle); + if (gobj == NULL) { + return -ENOENT; + } + robj = gem_to_radeon_bo(gobj); + + r = -EPERM; + if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) + goto out; + + r = radeon_bo_reserve(robj, false); + if (unlikely(r)) + goto out; + + switch (args->op) { + case RADEON_GEM_OP_GET_INITIAL_DOMAIN: + args->value = robj->initial_domain; + break; + case RADEON_GEM_OP_SET_INITIAL_DOMAIN: + robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | + RADEON_GEM_DOMAIN_GTT | + RADEON_GEM_DOMAIN_CPU); + break; + default: + r = -EINVAL; + } + + radeon_bo_unreserve(robj); +out: drm_gem_object_unreference_unlocked(gobj); return r; } @@ -554,9 +749,8 @@ args->size = ALIGN(args->size, PAGE_SIZE); r = radeon_gem_object_create(rdev, args->size, 0, - RADEON_GEM_DOMAIN_VRAM, - false, ttm_bo_type_device, - &gobj); + RADEON_GEM_DOMAIN_VRAM, 0, + false, &gobj); if (r) return -ENOMEM; @@ -570,13 +764,6 @@ return 0; } -int radeon_mode_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle) -{ - return drm_gem_handle_delete(file_priv, handle); -} - #if defined(CONFIG_DEBUG_FS) static int radeon_debugfs_gem_info(struct seq_file *m, void *data) {