--- zzzz-none-000/linux-3.10.107/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2021-02-04 17:41:59.000000000 +0000 @@ -32,36 +32,44 @@ #include "i915_trace.h" #include "intel_drv.h" #include +#include -struct eb_objects { - struct list_head objects; +#define __EXEC_OBJECT_HAS_PIN (1<<31) +#define __EXEC_OBJECT_HAS_FENCE (1<<30) +#define __EXEC_OBJECT_NEEDS_MAP (1<<29) +#define __EXEC_OBJECT_NEEDS_BIAS (1<<28) + +#define BATCH_OFFSET_BIAS (256*1024) + +struct eb_vmas { + struct list_head vmas; int and; union { - struct drm_i915_gem_object *lut[0]; + struct i915_vma *lut[0]; struct hlist_head buckets[0]; }; }; -static struct eb_objects * +static struct eb_vmas * eb_create(struct drm_i915_gem_execbuffer2 *args) { - struct eb_objects *eb = NULL; + struct eb_vmas *eb = NULL; if (args->flags & I915_EXEC_HANDLE_LUT) { - int size = args->buffer_count; - size *= sizeof(struct drm_i915_gem_object *); - size += sizeof(struct eb_objects); + unsigned size = args->buffer_count; + size *= sizeof(struct i915_vma *); + size += sizeof(struct eb_vmas); eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); } if (eb == NULL) { - int size = args->buffer_count; - int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; + unsigned size = args->buffer_count; + unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2; BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); while (count > 2*size) count >>= 1; eb = kzalloc(count*sizeof(struct hlist_head) + - sizeof(struct eb_objects), + sizeof(struct eb_vmas), GFP_TEMPORARY); if (eb == NULL) return eb; @@ -70,64 +78,114 @@ } else eb->and = -args->buffer_count; - INIT_LIST_HEAD(&eb->objects); + INIT_LIST_HEAD(&eb->vmas); return eb; } static void -eb_reset(struct eb_objects *eb) +eb_reset(struct eb_vmas *eb) { if (eb->and >= 0) memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); } static int -eb_lookup_objects(struct eb_objects *eb, - struct drm_i915_gem_exec_object2 *exec, - const struct drm_i915_gem_execbuffer2 *args, - struct drm_file *file) +eb_lookup_vmas(struct eb_vmas *eb, + struct drm_i915_gem_exec_object2 *exec, + const struct drm_i915_gem_execbuffer2 *args, + struct i915_address_space *vm, + struct drm_file *file) { - int i; + struct drm_i915_gem_object *obj; + struct list_head objects; + int i, ret; + INIT_LIST_HEAD(&objects); spin_lock(&file->table_lock); + /* Grab a reference to the object and release the lock so we can lookup + * or create the VMA without using GFP_ATOMIC */ for (i = 0; i < args->buffer_count; i++) { - struct drm_i915_gem_object *obj; - obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle)); if (obj == NULL) { spin_unlock(&file->table_lock); DRM_DEBUG("Invalid object handle %d at index %d\n", exec[i].handle, i); - return -ENOENT; + ret = -ENOENT; + goto err; } - if (!list_empty(&obj->exec_list)) { + if (!list_empty(&obj->obj_exec_link)) { spin_unlock(&file->table_lock); DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", obj, exec[i].handle, i); - return -EINVAL; + ret = -EINVAL; + goto err; } drm_gem_object_reference(&obj->base); - list_add_tail(&obj->exec_list, &eb->objects); + list_add_tail(&obj->obj_exec_link, &objects); + } + spin_unlock(&file->table_lock); + + i = 0; + while (!list_empty(&objects)) { + struct i915_vma *vma; + + obj = list_first_entry(&objects, + struct drm_i915_gem_object, + obj_exec_link); - obj->exec_entry = &exec[i]; + /* + * NOTE: We can leak any vmas created here when something fails + * later on. But that's no issue since vma_unbind can deal with + * vmas which are not actually bound. And since only + * lookup_or_create exists as an interface to get at the vma + * from the (obj, vm) we don't run the risk of creating + * duplicated vmas for the same vm. + */ + vma = i915_gem_obj_lookup_or_create_vma(obj, vm); + if (IS_ERR(vma)) { + DRM_DEBUG("Failed to lookup VMA\n"); + ret = PTR_ERR(vma); + goto err; + } + + /* Transfer ownership from the objects list to the vmas list. */ + list_add_tail(&vma->exec_list, &eb->vmas); + list_del_init(&obj->obj_exec_link); + + vma->exec_entry = &exec[i]; if (eb->and < 0) { - eb->lut[i] = obj; + eb->lut[i] = vma; } else { uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle; - obj->exec_handle = handle; - hlist_add_head(&obj->exec_node, + vma->exec_handle = handle; + hlist_add_head(&vma->exec_node, &eb->buckets[handle & eb->and]); } + ++i; } - spin_unlock(&file->table_lock); return 0; + + +err: + while (!list_empty(&objects)) { + obj = list_first_entry(&objects, + struct drm_i915_gem_object, + obj_exec_link); + list_del_init(&obj->obj_exec_link); + drm_gem_object_unreference(&obj->base); + } + /* + * Objects already transfered to the vmas list will be unreferenced by + * eb_destroy. + */ + + return ret; } -static struct drm_i915_gem_object * -eb_get_object(struct eb_objects *eb, unsigned long handle) +static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle) { if (eb->and < 0) { if (handle >= -eb->and) @@ -139,65 +197,215 @@ head = &eb->buckets[handle & eb->and]; hlist_for_each(node, head) { - struct drm_i915_gem_object *obj; + struct i915_vma *vma; - obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); - if (obj->exec_handle == handle) - return obj; + vma = hlist_entry(node, struct i915_vma, exec_node); + if (vma->exec_handle == handle) + return vma; } return NULL; } } static void -eb_destroy(struct eb_objects *eb) +i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) { - while (!list_empty(&eb->objects)) { - struct drm_i915_gem_object *obj; + struct drm_i915_gem_exec_object2 *entry; + struct drm_i915_gem_object *obj = vma->obj; - obj = list_first_entry(&eb->objects, - struct drm_i915_gem_object, + if (!drm_mm_node_allocated(&vma->node)) + return; + + entry = vma->exec_entry; + + if (entry->flags & __EXEC_OBJECT_HAS_FENCE) + i915_gem_object_unpin_fence(obj); + + if (entry->flags & __EXEC_OBJECT_HAS_PIN) + vma->pin_count--; + + entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); +} + +static void eb_destroy(struct eb_vmas *eb) +{ + while (!list_empty(&eb->vmas)) { + struct i915_vma *vma; + + vma = list_first_entry(&eb->vmas, + struct i915_vma, exec_list); - list_del_init(&obj->exec_list); - drm_gem_object_unreference(&obj->base); + list_del_init(&vma->exec_list); + i915_gem_execbuffer_unreserve_vma(vma); + drm_gem_object_unreference(&vma->obj->base); } kfree(eb); } static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) { - return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || - !obj->map_and_fenceable || + return (HAS_LLC(obj->base.dev) || + obj->base.write_domain == I915_GEM_DOMAIN_CPU || obj->cache_level != I915_CACHE_NONE); } static int +relocate_entry_cpu(struct drm_i915_gem_object *obj, + struct drm_i915_gem_relocation_entry *reloc, + uint64_t target_offset) +{ + struct drm_device *dev = obj->base.dev; + uint32_t page_offset = offset_in_page(reloc->offset); + uint64_t delta = reloc->delta + target_offset; + char *vaddr; + int ret; + + ret = i915_gem_object_set_to_cpu_domain(obj, true); + if (ret) + return ret; + + vaddr = kmap_atomic(i915_gem_object_get_page(obj, + reloc->offset >> PAGE_SHIFT)); + *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta); + + if (INTEL_INFO(dev)->gen >= 8) { + page_offset = offset_in_page(page_offset + sizeof(uint32_t)); + + if (page_offset == 0) { + kunmap_atomic(vaddr); + vaddr = kmap_atomic(i915_gem_object_get_page(obj, + (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); + } + + *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta); + } + + kunmap_atomic(vaddr); + + return 0; +} + +static int +relocate_entry_gtt(struct drm_i915_gem_object *obj, + struct drm_i915_gem_relocation_entry *reloc, + uint64_t target_offset) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint64_t delta = reloc->delta + target_offset; + uint64_t offset; + void __iomem *reloc_page; + int ret; + + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + return ret; + + ret = i915_gem_object_put_fence(obj); + if (ret) + return ret; + + /* Map the page containing the relocation we're going to perform. */ + offset = i915_gem_obj_ggtt_offset(obj); + offset += reloc->offset; + reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, + offset & PAGE_MASK); + iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset)); + + if (INTEL_INFO(dev)->gen >= 8) { + offset += sizeof(uint32_t); + + if (offset_in_page(offset) == 0) { + io_mapping_unmap_atomic(reloc_page); + reloc_page = + io_mapping_map_atomic_wc(dev_priv->gtt.mappable, + offset); + } + + iowrite32(upper_32_bits(delta), + reloc_page + offset_in_page(offset)); + } + + io_mapping_unmap_atomic(reloc_page); + + return 0; +} + +static void +clflush_write32(void *addr, uint32_t value) +{ + /* This is not a fast path, so KISS. */ + drm_clflush_virt_range(addr, sizeof(uint32_t)); + *(uint32_t *)addr = value; + drm_clflush_virt_range(addr, sizeof(uint32_t)); +} + +static int +relocate_entry_clflush(struct drm_i915_gem_object *obj, + struct drm_i915_gem_relocation_entry *reloc, + uint64_t target_offset) +{ + struct drm_device *dev = obj->base.dev; + uint32_t page_offset = offset_in_page(reloc->offset); + uint64_t delta = (int)reloc->delta + target_offset; + char *vaddr; + int ret; + + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + return ret; + + vaddr = kmap_atomic(i915_gem_object_get_page(obj, + reloc->offset >> PAGE_SHIFT)); + clflush_write32(vaddr + page_offset, lower_32_bits(delta)); + + if (INTEL_INFO(dev)->gen >= 8) { + page_offset = offset_in_page(page_offset + sizeof(uint32_t)); + + if (page_offset == 0) { + kunmap_atomic(vaddr); + vaddr = kmap_atomic(i915_gem_object_get_page(obj, + (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); + } + + clflush_write32(vaddr + page_offset, upper_32_bits(delta)); + } + + kunmap_atomic(vaddr); + + return 0; +} + +static int i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, - struct eb_objects *eb, + struct eb_vmas *eb, struct drm_i915_gem_relocation_entry *reloc) { struct drm_device *dev = obj->base.dev; struct drm_gem_object *target_obj; struct drm_i915_gem_object *target_i915_obj; - uint32_t target_offset; - int ret = -EINVAL; + struct i915_vma *target_vma; + uint64_t target_offset; + int ret; /* we've already hold a reference to all valid objects */ - target_obj = &eb_get_object(eb, reloc->target_handle)->base; - if (unlikely(target_obj == NULL)) + target_vma = eb_get_vma(eb, reloc->target_handle); + if (unlikely(target_vma == NULL)) return -ENOENT; + target_i915_obj = target_vma->obj; + target_obj = &target_vma->obj->base; - target_i915_obj = to_intel_bo(target_obj); - target_offset = target_i915_obj->gtt_offset; + target_offset = target_vma->node.start; /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and * pipe_control writes because the gpu doesn't properly redirect them * through the ppgtt for non_secure batchbuffers. */ if (unlikely(IS_GEN6(dev) && - reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && - !target_i915_obj->has_global_gtt_mapping)) { - i915_gem_gtt_bind_object(target_i915_obj, - target_i915_obj->cache_level); + reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) { + ret = i915_vma_bind(target_vma, target_i915_obj->cache_level, + PIN_GLOBAL); + if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!")) + return ret; } /* Validate that the target is in a valid r/w GPU domain */ @@ -209,7 +417,7 @@ (int) reloc->offset, reloc->read_domains, reloc->write_domain); - return ret; + return -EINVAL; } if (unlikely((reloc->write_domain | reloc->read_domains) & ~I915_GEM_GPU_DOMAINS)) { @@ -220,7 +428,7 @@ (int) reloc->offset, reloc->read_domains, reloc->write_domain); - return ret; + return -EINVAL; } target_obj->pending_read_domains |= reloc->read_domains; @@ -233,62 +441,41 @@ return 0; /* Check that the relocation address is valid... */ - if (unlikely(reloc->offset > obj->base.size - 4)) { + if (unlikely(reloc->offset > + obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) { DRM_DEBUG("Relocation beyond object bounds: " "obj %p target %d offset %d size %d.\n", obj, reloc->target_handle, (int) reloc->offset, (int) obj->base.size); - return ret; + return -EINVAL; } if (unlikely(reloc->offset & 3)) { DRM_DEBUG("Relocation not 4-byte aligned: " "obj %p target %d offset %d.\n", obj, reloc->target_handle, (int) reloc->offset); - return ret; + return -EINVAL; } /* We can't wait for rendering with pagefaults disabled */ - if (obj->active && in_atomic()) + if (obj->active && pagefault_disabled()) return -EFAULT; - reloc->delta += target_offset; - if (use_cpu_reloc(obj)) { - uint32_t page_offset = reloc->offset & ~PAGE_MASK; - char *vaddr; - - ret = i915_gem_object_set_to_cpu_domain(obj, 1); - if (ret) - return ret; - - vaddr = kmap_atomic(i915_gem_object_get_page(obj, - reloc->offset >> PAGE_SHIFT)); - *(uint32_t *)(vaddr + page_offset) = reloc->delta; - kunmap_atomic(vaddr); - } else { - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t __iomem *reloc_entry; - void __iomem *reloc_page; - - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) - return ret; - - ret = i915_gem_object_put_fence(obj); - if (ret) - return ret; - - /* Map the page containing the relocation we're going to perform. */ - reloc->offset += obj->gtt_offset; - reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, - reloc->offset & PAGE_MASK); - reloc_entry = (uint32_t __iomem *) - (reloc_page + (reloc->offset & ~PAGE_MASK)); - iowrite32(reloc->delta, reloc_entry); - io_mapping_unmap_atomic(reloc_page); + if (use_cpu_reloc(obj)) + ret = relocate_entry_cpu(obj, reloc, target_offset); + else if (obj->map_and_fenceable) + ret = relocate_entry_gtt(obj, reloc, target_offset); + else if (cpu_has_clflush) + ret = relocate_entry_clflush(obj, reloc, target_offset); + else { + WARN_ONCE(1, "Impossible case in relocation handling\n"); + ret = -ENODEV; } + if (ret) + return ret; + /* and update the user's relocation entry */ reloc->presumed_offset = target_offset; @@ -296,13 +483,13 @@ } static int -i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, - struct eb_objects *eb) +i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, + struct eb_vmas *eb) { #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)]; struct drm_i915_gem_relocation_entry __user *user_relocs; - struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; + struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; int remain, ret; user_relocs = to_user_ptr(entry->relocs_ptr); @@ -321,7 +508,7 @@ do { u64 offset = r->presumed_offset; - ret = i915_gem_execbuffer_relocate_entry(obj, eb, r); + ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r); if (ret) return ret; @@ -342,15 +529,15 @@ } static int -i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, - struct eb_objects *eb, - struct drm_i915_gem_relocation_entry *relocs) +i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma, + struct eb_vmas *eb, + struct drm_i915_gem_relocation_entry *relocs) { - const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; + const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; int i, ret; for (i = 0; i < entry->relocation_count; i++) { - ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]); + ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]); if (ret) return ret; } @@ -359,9 +546,9 @@ } static int -i915_gem_execbuffer_relocate(struct eb_objects *eb) +i915_gem_execbuffer_relocate(struct eb_vmas *eb) { - struct drm_i915_gem_object *obj; + struct i915_vma *vma; int ret = 0; /* This is the fast path and we cannot handle a pagefault whilst @@ -372,8 +559,8 @@ * lockdep complains vehemently. */ pagefault_disable(); - list_for_each_entry(obj, &eb->objects, exec_list) { - ret = i915_gem_execbuffer_relocate_object(obj, eb); + list_for_each_entry(vma, &eb->vmas, exec_list) { + ret = i915_gem_execbuffer_relocate_vma(vma, eb); if (ret) break; } @@ -382,62 +569,62 @@ return ret; } -#define __EXEC_OBJECT_HAS_PIN (1<<31) -#define __EXEC_OBJECT_HAS_FENCE (1<<30) - -static int -need_reloc_mappable(struct drm_i915_gem_object *obj) +static bool only_mappable_for_reloc(unsigned int flags) { - struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; - return entry->relocation_count && !use_cpu_reloc(obj); + return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) == + __EXEC_OBJECT_NEEDS_MAP; } static int -i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *ring, - bool *need_reloc) -{ - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; - bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; - bool need_fence, need_mappable; +i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, + struct intel_engine_cs *ring, + bool *need_reloc) +{ + struct drm_i915_gem_object *obj = vma->obj; + struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; + uint64_t flags; int ret; - need_fence = - has_fenced_gpu_access && - entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj->tiling_mode != I915_TILING_NONE; - need_mappable = need_fence || need_reloc_mappable(obj); - - ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false); + flags = PIN_USER; + if (entry->flags & EXEC_OBJECT_NEEDS_GTT) + flags |= PIN_GLOBAL; + + if (!drm_mm_node_allocated(&vma->node)) { + /* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset, + * limit address to the first 4GBs for unflagged objects. + */ + if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0) + flags |= PIN_ZONE_4G; + if (entry->flags & __EXEC_OBJECT_NEEDS_MAP) + flags |= PIN_GLOBAL | PIN_MAPPABLE; + if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) + flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; + if ((flags & PIN_MAPPABLE) == 0) + flags |= PIN_HIGH; + } + + ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); + if ((ret == -ENOSPC || ret == -E2BIG) && + only_mappable_for_reloc(entry->flags)) + ret = i915_gem_object_pin(obj, vma->vm, + entry->alignment, + flags & ~PIN_MAPPABLE); if (ret) return ret; entry->flags |= __EXEC_OBJECT_HAS_PIN; - if (has_fenced_gpu_access) { - if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { - ret = i915_gem_object_get_fence(obj); - if (ret) - return ret; - - if (i915_gem_object_pin_fence(obj)) - entry->flags |= __EXEC_OBJECT_HAS_FENCE; - - obj->pending_fenced_gpu_access = true; - } - } - - /* Ensure ppgtt mapping exists if needed */ - if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { - i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, - obj, obj->cache_level); + if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { + ret = i915_gem_object_get_fence(obj); + if (ret) + return ret; - obj->has_aliasing_ppgtt_mapping = 1; + if (i915_gem_object_pin_fence(obj)) + entry->flags |= __EXEC_OBJECT_HAS_FENCE; } - if (entry->offset != obj->gtt_offset) { - entry->offset = obj->gtt_offset; + if (entry->offset != vma->node.start) { + entry->offset = vma->node.start; *need_reloc = true; } @@ -446,68 +633,104 @@ obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; } - if (entry->flags & EXEC_OBJECT_NEEDS_GTT && - !obj->has_global_gtt_mapping) - i915_gem_gtt_bind_object(obj, obj->cache_level); - return 0; } -static void -i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) +static bool +need_reloc_mappable(struct i915_vma *vma) { - struct drm_i915_gem_exec_object2 *entry; + struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; - if (!obj->gtt_space) - return; + if (entry->relocation_count == 0) + return false; - entry = obj->exec_entry; + if (!i915_is_ggtt(vma->vm)) + return false; - if (entry->flags & __EXEC_OBJECT_HAS_FENCE) - i915_gem_object_unpin_fence(obj); + /* See also use_cpu_reloc() */ + if (HAS_LLC(vma->obj->base.dev)) + return false; - if (entry->flags & __EXEC_OBJECT_HAS_PIN) - i915_gem_object_unpin(obj); + if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU) + return false; - entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); + return true; +} + +static bool +eb_vma_misplaced(struct i915_vma *vma) +{ + struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; + struct drm_i915_gem_object *obj = vma->obj; + + WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && + !i915_is_ggtt(vma->vm)); + + if (entry->alignment && + vma->node.start & (entry->alignment - 1)) + return true; + + if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && + vma->node.start < BATCH_OFFSET_BIAS) + return true; + + /* avoid costly ping-pong once a batch bo ended up non-mappable */ + if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable) + return !only_mappable_for_reloc(entry->flags); + + if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 && + (vma->node.start + vma->node.size - 1) >> 32) + return true; + + return false; } static int -i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, - struct list_head *objects, +i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, + struct list_head *vmas, + struct intel_context *ctx, bool *need_relocs) { struct drm_i915_gem_object *obj; - struct list_head ordered_objects; + struct i915_vma *vma; + struct i915_address_space *vm; + struct list_head ordered_vmas; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; int retry; - INIT_LIST_HEAD(&ordered_objects); - while (!list_empty(objects)) { + i915_gem_retire_requests_ring(ring); + + vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; + + INIT_LIST_HEAD(&ordered_vmas); + while (!list_empty(vmas)) { struct drm_i915_gem_exec_object2 *entry; bool need_fence, need_mappable; - obj = list_first_entry(objects, - struct drm_i915_gem_object, - exec_list); - entry = obj->exec_entry; + vma = list_first_entry(vmas, struct i915_vma, exec_list); + obj = vma->obj; + entry = vma->exec_entry; + + if (ctx->flags & CONTEXT_NO_ZEROMAP) + entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; + if (!has_fenced_gpu_access) + entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; need_fence = - has_fenced_gpu_access && entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; - need_mappable = need_fence || need_reloc_mappable(obj); + need_mappable = need_fence || need_reloc_mappable(vma); - if (need_mappable) - list_move(&obj->exec_list, &ordered_objects); - else - list_move_tail(&obj->exec_list, &ordered_objects); + if (need_mappable) { + entry->flags |= __EXEC_OBJECT_NEEDS_MAP; + list_move(&vma->exec_list, &ordered_vmas); + } else + list_move_tail(&vma->exec_list, &ordered_vmas); obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; obj->base.pending_write_domain = 0; - obj->pending_fenced_gpu_access = false; } - list_splice(&ordered_objects, objects); + list_splice(&ordered_vmas, vmas); /* Attempt to pin all of the buffers into the GTT. * This is done in 3 phases: @@ -526,46 +749,37 @@ int ret = 0; /* Unbind any ill-fitting objects or pin. */ - list_for_each_entry(obj, objects, exec_list) { - struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; - bool need_fence, need_mappable; - - if (!obj->gtt_space) + list_for_each_entry(vma, vmas, exec_list) { + if (!drm_mm_node_allocated(&vma->node)) continue; - need_fence = - has_fenced_gpu_access && - entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj->tiling_mode != I915_TILING_NONE; - need_mappable = need_fence || need_reloc_mappable(obj); - - if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || - (need_mappable && !obj->map_and_fenceable)) - ret = i915_gem_object_unbind(obj); + if (eb_vma_misplaced(vma)) + ret = i915_vma_unbind(vma); else - ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); + ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); if (ret) goto err; } /* Bind fresh objects */ - list_for_each_entry(obj, objects, exec_list) { - if (obj->gtt_space) + list_for_each_entry(vma, vmas, exec_list) { + if (drm_mm_node_allocated(&vma->node)) continue; - ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); + ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); if (ret) goto err; } -err: /* Decrement pin count for bound objects */ - list_for_each_entry(obj, objects, exec_list) - i915_gem_execbuffer_unreserve_object(obj); - +err: if (ret != -ENOSPC || retry++) return ret; - ret = i915_gem_evict_everything(ring->dev); + /* Decrement pin count for bound objects */ + list_for_each_entry(vma, vmas, exec_list) + i915_gem_execbuffer_unreserve_vma(vma); + + ret = i915_gem_evict_vm(vm, true); if (ret) return ret; } while (1); @@ -575,24 +789,27 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_i915_gem_execbuffer2 *args, struct drm_file *file, - struct intel_ring_buffer *ring, - struct eb_objects *eb, - struct drm_i915_gem_exec_object2 *exec) + struct intel_engine_cs *ring, + struct eb_vmas *eb, + struct drm_i915_gem_exec_object2 *exec, + struct intel_context *ctx) { struct drm_i915_gem_relocation_entry *reloc; - struct drm_i915_gem_object *obj; + struct i915_address_space *vm; + struct i915_vma *vma; bool need_relocs; int *reloc_offset; int i, total, ret; - int count = args->buffer_count; + unsigned count = args->buffer_count; + + vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm; /* We may process another execbuffer during the unlock... */ - while (!list_empty(&eb->objects)) { - obj = list_first_entry(&eb->objects, - struct drm_i915_gem_object, - exec_list); - list_del_init(&obj->exec_list); - drm_gem_object_unreference(&obj->base); + while (!list_empty(&eb->vmas)) { + vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); + list_del_init(&vma->exec_list); + i915_gem_execbuffer_unreserve_vma(vma); + drm_gem_object_unreference(&vma->obj->base); } mutex_unlock(&dev->struct_mutex); @@ -656,19 +873,19 @@ /* reacquire the objects */ eb_reset(eb); - ret = eb_lookup_objects(eb, exec, args, file); + ret = eb_lookup_vmas(eb, exec, args, vm, file); if (ret) goto err; need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; - ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs); + ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs); if (ret) goto err; - list_for_each_entry(obj, &eb->objects, exec_list) { - int offset = obj->exec_entry - exec; - ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, - reloc + reloc_offset[offset]); + list_for_each_entry(vma, &eb->vmas, exec_list) { + int offset = vma->exec_entry - exec; + ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb, + reloc + reloc_offset[offset]); if (ret) goto err; } @@ -686,26 +903,32 @@ } static int -i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, - struct list_head *objects) +i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, + struct list_head *vmas) { - struct drm_i915_gem_object *obj; + const unsigned other_rings = ~intel_ring_flag(req->ring); + struct i915_vma *vma; uint32_t flush_domains = 0; + bool flush_chipset = false; int ret; - list_for_each_entry(obj, objects, exec_list) { - ret = i915_gem_object_sync(obj, ring); - if (ret) - return ret; + list_for_each_entry(vma, vmas, exec_list) { + struct drm_i915_gem_object *obj = vma->obj; + + if (obj->active & other_rings) { + ret = i915_gem_object_sync(obj, req->ring, &req); + if (ret) + return ret; + } if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) - i915_gem_clflush_object(obj); + flush_chipset |= i915_gem_clflush_object(obj, false); flush_domains |= obj->base.write_domain; } - if (flush_domains & I915_GEM_DOMAIN_CPU) - i915_gem_chipset_flush(ring->dev); + if (flush_chipset) + i915_gem_chipset_flush(req->ring->dev); if (flush_domains & I915_GEM_DOMAIN_GTT) wmb(); @@ -713,7 +936,7 @@ /* Unconditionally invalidate gpu caches and ensure that we do flush * any residual writes from the previous batch. */ - return intel_ring_invalidate_all_caches(ring); + return intel_ring_invalidate_all_caches(req); } static bool @@ -722,22 +945,45 @@ if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS) return false; - return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; + /* Kernel clipping was a DRI1 misfeature */ + if (exec->num_cliprects || exec->cliprects_ptr) + return false; + + if (exec->DR4 == 0xffffffff) { + DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); + exec->DR4 = 0; + } + if (exec->DR1 || exec->DR4) + return false; + + if ((exec->batch_start_offset | exec->batch_len) & 0x7) + return false; + + return true; } static int -validate_exec_list(struct drm_i915_gem_exec_object2 *exec, +validate_exec_list(struct drm_device *dev, + struct drm_i915_gem_exec_object2 *exec, int count) { + unsigned relocs_total = 0; + unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry); + unsigned invalid_flags; int i; - int relocs_total = 0; - int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); + + invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; + if (USES_FULL_PPGTT(dev)) + invalid_flags |= EXEC_OBJECT_NEEDS_GTT; for (i = 0; i < count; i++) { char __user *ptr = to_user_ptr(exec[i].relocs_ptr); int length; /* limited by fault_in_pages_readable() */ - if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) + if (exec[i].flags & invalid_flags) + return -EINVAL; + + if (exec[i].alignment && !is_power_of_2(exec[i].alignment)) return -EINVAL; /* First check for malicious input causing overflow in @@ -758,64 +1004,111 @@ if (!access_ok(VERIFY_WRITE, ptr, length)) return -EFAULT; - if (fault_in_multipages_readable(ptr, length)) - return -EFAULT; + if (likely(!i915.prefault_disable)) { + if (fault_in_multipages_readable(ptr, length)) + return -EFAULT; + } } return 0; } -static void -i915_gem_execbuffer_move_to_active(struct list_head *objects, - struct intel_ring_buffer *ring) +static struct intel_context * +i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, + struct intel_engine_cs *ring, const u32 ctx_id) { - struct drm_i915_gem_object *obj; + struct intel_context *ctx = NULL; + struct i915_ctx_hang_stats *hs; + + if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE) + return ERR_PTR(-EINVAL); + + ctx = i915_gem_context_get(file->driver_priv, ctx_id); + if (IS_ERR(ctx)) + return ctx; + + hs = &ctx->hang_stats; + if (hs->banned) { + DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id); + return ERR_PTR(-EIO); + } + + if (i915.enable_execlists && !ctx->engine[ring->id].state) { + int ret = intel_lr_context_deferred_alloc(ctx, ring); + if (ret) { + DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret); + return ERR_PTR(ret); + } + } + + return ctx; +} - list_for_each_entry(obj, objects, exec_list) { +void +i915_gem_execbuffer_move_to_active(struct list_head *vmas, + struct drm_i915_gem_request *req) +{ + struct intel_engine_cs *ring = i915_gem_request_get_ring(req); + struct i915_vma *vma; + + list_for_each_entry(vma, vmas, exec_list) { + struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; + struct drm_i915_gem_object *obj = vma->obj; u32 old_read = obj->base.read_domains; u32 old_write = obj->base.write_domain; + obj->dirty = 1; /* be paranoid */ obj->base.write_domain = obj->base.pending_write_domain; if (obj->base.write_domain == 0) obj->base.pending_read_domains |= obj->base.read_domains; obj->base.read_domains = obj->base.pending_read_domains; - obj->fenced_gpu_access = obj->pending_fenced_gpu_access; - i915_gem_object_move_to_active(obj, ring); + i915_vma_move_to_active(vma, req); if (obj->base.write_domain) { - obj->dirty = 1; - obj->last_write_seqno = intel_ring_get_seqno(ring); - if (obj->pin_count) /* check for potential scanout */ - intel_mark_fb_busy(obj); + i915_gem_request_assign(&obj->last_write_req, req); + + intel_fb_obj_invalidate(obj, ORIGIN_CS); + + /* update for the implicit flush after a batch */ + obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; + } + if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { + i915_gem_request_assign(&obj->last_fenced_req, req); + if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { + struct drm_i915_private *dev_priv = to_i915(ring->dev); + list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, + &dev_priv->mm.fence_list); + } } trace_i915_gem_object_change_domain(obj, old_read, old_write); } } -static void -i915_gem_execbuffer_retire_commands(struct drm_device *dev, - struct drm_file *file, - struct intel_ring_buffer *ring) +void +i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params) { /* Unconditionally force add_request to emit a full flush. */ - ring->gpu_caches_dirty = true; + params->ring->gpu_caches_dirty = true; /* Add a breadcrumb for the completion of the batch buffer */ - (void)i915_add_request(ring, file, NULL); + __i915_add_request(params->request, params->batch_obj, true); } static int i915_reset_gen7_sol_offsets(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct drm_i915_gem_request *req) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_engine_cs *ring = req->ring; + struct drm_i915_private *dev_priv = dev->dev_private; int ret, i; - if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) - return 0; + if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) { + DRM_DEBUG("sol reset is gen7/rcs only\n"); + return -EINVAL; + } - ret = intel_ring_begin(ring, 4 * 3); + ret = intel_ring_begin(req, 4 * 3); if (ret) return ret; @@ -830,94 +1123,285 @@ return 0; } +static struct drm_i915_gem_object* +i915_gem_execbuffer_parse(struct intel_engine_cs *ring, + struct drm_i915_gem_exec_object2 *shadow_exec_entry, + struct eb_vmas *eb, + struct drm_i915_gem_object *batch_obj, + u32 batch_start_offset, + u32 batch_len, + bool is_master) +{ + struct drm_i915_gem_object *shadow_batch_obj; + struct i915_vma *vma; + int ret; + + shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool, + PAGE_ALIGN(batch_len)); + if (IS_ERR(shadow_batch_obj)) + return shadow_batch_obj; + + ret = i915_parse_cmds(ring, + batch_obj, + shadow_batch_obj, + batch_start_offset, + batch_len, + is_master); + if (ret) + goto err; + + ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0); + if (ret) + goto err; + + i915_gem_object_unpin_pages(shadow_batch_obj); + + memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry)); + + vma = i915_gem_obj_to_ggtt(shadow_batch_obj); + vma->exec_entry = shadow_exec_entry; + vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN; + drm_gem_object_reference(&shadow_batch_obj->base); + list_add_tail(&vma->exec_list, &eb->vmas); + + shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND; + + return shadow_batch_obj; + +err: + i915_gem_object_unpin_pages(shadow_batch_obj); + if (ret == -EACCES) /* unhandled chained batch */ + return batch_obj; + else + return ERR_PTR(ret); +} + +int +i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, + struct drm_i915_gem_execbuffer2 *args, + struct list_head *vmas) +{ + struct drm_device *dev = params->dev; + struct intel_engine_cs *ring = params->ring; + struct drm_i915_private *dev_priv = dev->dev_private; + u64 exec_start, exec_len; + int instp_mode; + u32 instp_mask; + int ret; + + ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas); + if (ret) + return ret; + + ret = i915_switch_context(params->request); + if (ret) + return ret; + + WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<id), + "%s didn't clear reload\n", ring->name); + + instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; + instp_mask = I915_EXEC_CONSTANTS_MASK; + switch (instp_mode) { + case I915_EXEC_CONSTANTS_REL_GENERAL: + case I915_EXEC_CONSTANTS_ABSOLUTE: + case I915_EXEC_CONSTANTS_REL_SURFACE: + if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) { + DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); + return -EINVAL; + } + + if (instp_mode != dev_priv->relative_constants_mode) { + if (INTEL_INFO(dev)->gen < 4) { + DRM_DEBUG("no rel constants on pre-gen4\n"); + return -EINVAL; + } + + if (INTEL_INFO(dev)->gen > 5 && + instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) { + DRM_DEBUG("rel surface constants mode invalid on gen5+\n"); + return -EINVAL; + } + + /* The HW changed the meaning on this bit on gen6 */ + if (INTEL_INFO(dev)->gen >= 6) + instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; + } + break; + default: + DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode); + return -EINVAL; + } + + if (ring == &dev_priv->ring[RCS] && + instp_mode != dev_priv->relative_constants_mode) { + ret = intel_ring_begin(params->request, 4); + if (ret) + return ret; + + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(ring, INSTPM); + intel_ring_emit(ring, instp_mask << 16 | instp_mode); + intel_ring_advance(ring); + + dev_priv->relative_constants_mode = instp_mode; + } + + if (args->flags & I915_EXEC_GEN7_SOL_RESET) { + ret = i915_reset_gen7_sol_offsets(dev, params->request); + if (ret) + return ret; + } + + exec_len = args->batch_len; + exec_start = params->batch_obj_vm_offset + + params->args_batch_start_offset; + + ret = ring->dispatch_execbuffer(params->request, + exec_start, exec_len, + params->dispatch_flags); + if (ret) + return ret; + + trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); + + i915_gem_execbuffer_move_to_active(vmas, params->request); + i915_gem_execbuffer_retire_commands(params); + + return 0; +} + +/** + * Find one BSD ring to dispatch the corresponding BSD command. + * The Ring ID is returned. + */ +static int gen8_dispatch_bsd_ring(struct drm_device *dev, + struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_file_private *file_priv = file->driver_priv; + + /* Check whether the file_priv is using one ring */ + if (file_priv->bsd_ring) + return file_priv->bsd_ring->id; + else { + /* If no, use the ping-pong mechanism to select one ring */ + int ring_id; + + mutex_lock(&dev->struct_mutex); + if (dev_priv->mm.bsd_ring_dispatch_index == 0) { + ring_id = VCS; + dev_priv->mm.bsd_ring_dispatch_index = 1; + } else { + ring_id = VCS2; + dev_priv->mm.bsd_ring_dispatch_index = 0; + } + file_priv->bsd_ring = &dev_priv->ring[ring_id]; + mutex_unlock(&dev->struct_mutex); + return ring_id; + } +} + +static struct drm_i915_gem_object * +eb_get_batch(struct eb_vmas *eb) +{ + struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list); + + /* + * SNA is doing fancy tricks with compressing batch buffers, which leads + * to negative relocation deltas. Usually that works out ok since the + * relocate address is still positive, except when the batch is placed + * very low in the GTT. Ensure this doesn't happen. + * + * Note that actual hangs have only been observed on gen7, but for + * paranoia do it everywhere. + */ + vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; + + return vma->obj; +} + static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file, struct drm_i915_gem_execbuffer2 *args, struct drm_i915_gem_exec_object2 *exec) { - drm_i915_private_t *dev_priv = dev->dev_private; - struct eb_objects *eb; + struct drm_i915_private *dev_priv = dev->dev_private; + struct eb_vmas *eb; struct drm_i915_gem_object *batch_obj; - struct drm_clip_rect *cliprects = NULL; - struct intel_ring_buffer *ring; - u32 ctx_id = i915_execbuffer2_get_context_id(*args); - u32 exec_start, exec_len; - u32 mask, flags; - int ret, mode, i; + struct drm_i915_gem_exec_object2 shadow_exec_entry; + struct intel_engine_cs *ring; + struct intel_context *ctx; + struct i915_address_space *vm; + struct i915_execbuffer_params params_master; /* XXX: will be removed later */ + struct i915_execbuffer_params *params = ¶ms_master; + const u32 ctx_id = i915_execbuffer2_get_context_id(*args); + u32 dispatch_flags; + int ret; bool need_relocs; if (!i915_gem_check_execbuffer(args)) return -EINVAL; - ret = validate_exec_list(exec, args->buffer_count); + ret = validate_exec_list(dev, exec, args->buffer_count); if (ret) return ret; - flags = 0; + dispatch_flags = 0; if (args->flags & I915_EXEC_SECURE) { if (!file->is_master || !capable(CAP_SYS_ADMIN)) return -EPERM; - flags |= I915_DISPATCH_SECURE; + dispatch_flags |= I915_DISPATCH_SECURE; } if (args->flags & I915_EXEC_IS_PINNED) - flags |= I915_DISPATCH_PINNED; + dispatch_flags |= I915_DISPATCH_PINNED; - switch (args->flags & I915_EXEC_RING_MASK) { - case I915_EXEC_DEFAULT: - case I915_EXEC_RENDER: - ring = &dev_priv->ring[RCS]; - break; - case I915_EXEC_BSD: - ring = &dev_priv->ring[VCS]; - if (ctx_id != 0) { - DRM_DEBUG("Ring %s doesn't support contexts\n", - ring->name); - return -EPERM; - } - break; - case I915_EXEC_BLT: - ring = &dev_priv->ring[BCS]; - if (ctx_id != 0) { - DRM_DEBUG("Ring %s doesn't support contexts\n", - ring->name); - return -EPERM; - } - break; - default: + if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) { DRM_DEBUG("execbuf with unknown ring: %d\n", (int)(args->flags & I915_EXEC_RING_MASK)); return -EINVAL; } - if (!intel_ring_initialized(ring)) { - DRM_DEBUG("execbuf with invalid ring: %d\n", - (int)(args->flags & I915_EXEC_RING_MASK)); - return -EINVAL; - } - mode = args->flags & I915_EXEC_CONSTANTS_MASK; - mask = I915_EXEC_CONSTANTS_MASK; - switch (mode) { - case I915_EXEC_CONSTANTS_REL_GENERAL: - case I915_EXEC_CONSTANTS_ABSOLUTE: - case I915_EXEC_CONSTANTS_REL_SURFACE: - if (ring == &dev_priv->ring[RCS] && - mode != dev_priv->relative_constants_mode) { - if (INTEL_INFO(dev)->gen < 4) - return -EINVAL; + if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) && + ((args->flags & I915_EXEC_BSD_MASK) != 0)) { + DRM_DEBUG("execbuf with non bsd ring but with invalid " + "bsd dispatch flags: %d\n", (int)(args->flags)); + return -EINVAL; + } - if (INTEL_INFO(dev)->gen > 5 && - mode == I915_EXEC_CONSTANTS_REL_SURFACE) + if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT) + ring = &dev_priv->ring[RCS]; + else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) { + if (HAS_BSD2(dev)) { + int ring_id; + + switch (args->flags & I915_EXEC_BSD_MASK) { + case I915_EXEC_BSD_DEFAULT: + ring_id = gen8_dispatch_bsd_ring(dev, file); + ring = &dev_priv->ring[ring_id]; + break; + case I915_EXEC_BSD_RING1: + ring = &dev_priv->ring[VCS]; + break; + case I915_EXEC_BSD_RING2: + ring = &dev_priv->ring[VCS2]; + break; + default: + DRM_DEBUG("execbuf with unknown bsd ring: %d\n", + (int)(args->flags & I915_EXEC_BSD_MASK)); return -EINVAL; + } + } else + ring = &dev_priv->ring[VCS]; + } else + ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1]; - /* The HW changed the meaning on this bit on gen6 */ - if (INTEL_INFO(dev)->gen >= 6) - mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; - } - break; - default: - DRM_DEBUG("execbuf with unknown constants: %d\n", mode); + if (!intel_ring_initialized(ring)) { + DRM_DEBUG("execbuf with invalid ring: %d\n", + (int)(args->flags & I915_EXEC_RING_MASK)); return -EINVAL; } @@ -926,68 +1410,61 @@ return -EINVAL; } - if (args->num_cliprects != 0) { - if (ring != &dev_priv->ring[RCS]) { - DRM_DEBUG("clip rectangles are only valid with the render ring\n"); + if (args->flags & I915_EXEC_RESOURCE_STREAMER) { + if (!HAS_RESOURCE_STREAMER(dev)) { + DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n"); return -EINVAL; } - - if (INTEL_INFO(dev)->gen >= 5) { - DRM_DEBUG("clip rectangles are only valid on pre-gen5\n"); + if (ring->id != RCS) { + DRM_DEBUG("RS is not available on %s\n", + ring->name); return -EINVAL; } - if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { - DRM_DEBUG("execbuf with %u cliprects\n", - args->num_cliprects); - return -EINVAL; - } - - cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), - GFP_KERNEL); - if (cliprects == NULL) { - ret = -ENOMEM; - goto pre_mutex_err; - } - - if (copy_from_user(cliprects, - to_user_ptr(args->cliprects_ptr), - sizeof(*cliprects)*args->num_cliprects)) { - ret = -EFAULT; - goto pre_mutex_err; - } + dispatch_flags |= I915_DISPATCH_RS; } + intel_runtime_pm_get(dev_priv); + ret = i915_mutex_lock_interruptible(dev); if (ret) goto pre_mutex_err; - if (dev_priv->mm.suspended) { + ctx = i915_gem_validate_context(dev, file, ring, ctx_id); + if (IS_ERR(ctx)) { mutex_unlock(&dev->struct_mutex); - ret = -EBUSY; + ret = PTR_ERR(ctx); goto pre_mutex_err; } + i915_gem_context_reference(ctx); + + if (ctx->ppgtt) + vm = &ctx->ppgtt->base; + else + vm = &dev_priv->gtt.base; + + memset(¶ms_master, 0x00, sizeof(params_master)); + eb = eb_create(args); if (eb == NULL) { + i915_gem_context_unreference(ctx); mutex_unlock(&dev->struct_mutex); ret = -ENOMEM; goto pre_mutex_err; } /* Look up object handles */ - ret = eb_lookup_objects(eb, exec, args, file); + ret = eb_lookup_vmas(eb, exec, args, vm, file); if (ret) goto err; /* take note of the batch buffer before we might reorder the lists */ - batch_obj = list_entry(eb->objects.prev, - struct drm_i915_gem_object, - exec_list); + batch_obj = eb_get_batch(eb); /* Move the objects en-masse into the GTT, evicting if necessary. */ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; - ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs); + ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs); if (ret) goto err; @@ -997,7 +1474,7 @@ if (ret) { if (ret == -EFAULT) { ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, - eb, exec); + eb, exec, ctx); BUG_ON(!mutex_is_locked(&dev->struct_mutex)); } if (ret) @@ -1010,79 +1487,121 @@ ret = -EINVAL; goto err; } - batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; - - /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure - * batch" bit. Hence we need to pin secure batches into the global gtt. - * hsw should have this fixed, but let's be paranoid and do it - * unconditionally for now. */ - if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) - i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); - - ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects); - if (ret) - goto err; - - ret = i915_switch_context(ring, file, ctx_id); - if (ret) - goto err; - if (ring == &dev_priv->ring[RCS] && - mode != dev_priv->relative_constants_mode) { - ret = intel_ring_begin(ring, 4); - if (ret) - goto err; + params->args_batch_start_offset = args->batch_start_offset; + if (i915_needs_cmd_parser(ring) && args->batch_len) { + struct drm_i915_gem_object *parsed_batch_obj; + + parsed_batch_obj = i915_gem_execbuffer_parse(ring, + &shadow_exec_entry, + eb, + batch_obj, + args->batch_start_offset, + args->batch_len, + file->is_master); + if (IS_ERR(parsed_batch_obj)) { + ret = PTR_ERR(parsed_batch_obj); + goto err; + } - intel_ring_emit(ring, MI_NOOP); - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(ring, INSTPM); - intel_ring_emit(ring, mask << 16 | mode); - intel_ring_advance(ring); + /* + * parsed_batch_obj == batch_obj means batch not fully parsed: + * Accept, but don't promote to secure. + */ - dev_priv->relative_constants_mode = mode; + if (parsed_batch_obj != batch_obj) { + /* + * Batch parsed and accepted: + * + * Set the DISPATCH_SECURE bit to remove the NON_SECURE + * bit from MI_BATCH_BUFFER_START commands issued in + * the dispatch_execbuffer implementations. We + * specifically don't want that set on batches the + * command parser has accepted. + */ + dispatch_flags |= I915_DISPATCH_SECURE; + params->args_batch_start_offset = 0; + batch_obj = parsed_batch_obj; + } } - if (args->flags & I915_EXEC_GEN7_SOL_RESET) { - ret = i915_reset_gen7_sol_offsets(dev, ring); + batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; + + /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure + * batch" bit. Hence we need to pin secure batches into the global gtt. + * hsw should have this fixed, but bdw mucks it up again. */ + if (dispatch_flags & I915_DISPATCH_SECURE) { + /* + * So on first glance it looks freaky that we pin the batch here + * outside of the reservation loop. But: + * - The batch is already pinned into the relevant ppgtt, so we + * already have the backing storage fully allocated. + * - No other BO uses the global gtt (well contexts, but meh), + * so we don't really have issues with multiple objects not + * fitting due to fragmentation. + * So this is actually safe. + */ + ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0); if (ret) goto err; - } - exec_start = batch_obj->gtt_offset + args->batch_start_offset; - exec_len = args->batch_len; - if (cliprects) { - for (i = 0; i < args->num_cliprects; i++) { - ret = i915_emit_box(dev, &cliprects[i], - args->DR1, args->DR4); - if (ret) - goto err; + params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj); + } else + params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm); - ret = ring->dispatch_execbuffer(ring, - exec_start, exec_len, - flags); - if (ret) - goto err; - } - } else { - ret = ring->dispatch_execbuffer(ring, - exec_start, exec_len, - flags); - if (ret) - goto err; - } + /* Allocate a request for this batch buffer nice and early. */ + ret = i915_gem_request_alloc(ring, ctx, ¶ms->request); + if (ret) + goto err_batch_unpin; - trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); + ret = i915_gem_request_add_to_client(params->request, file); + if (ret) + goto err_batch_unpin; - i915_gem_execbuffer_move_to_active(&eb->objects, ring); - i915_gem_execbuffer_retire_commands(dev, file, ring); + /* + * Save assorted stuff away to pass through to *_submission(). + * NB: This data should be 'persistent' and not local as it will + * kept around beyond the duration of the IOCTL once the GPU + * scheduler arrives. + */ + params->dev = dev; + params->file = file; + params->ring = ring; + params->dispatch_flags = dispatch_flags; + params->batch_obj = batch_obj; + params->ctx = ctx; + + ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas); + +err_batch_unpin: + /* + * FIXME: We crucially rely upon the active tracking for the (ppgtt) + * batch vma for correctness. For less ugly and less fragility this + * needs to be adjusted to also track the ggtt batch vma properly as + * active. + */ + if (dispatch_flags & I915_DISPATCH_SECURE) + i915_gem_object_ggtt_unpin(batch_obj); err: + /* the request owns the ref now */ + i915_gem_context_unreference(ctx); eb_destroy(eb); + /* + * If the request was created but not successfully submitted then it + * must be freed again. If it was submitted then it is being tracked + * on the active request list and no clean up is required here. + */ + if (ret && params->request) + i915_gem_request_cancel(params->request); + mutex_unlock(&dev->struct_mutex); pre_mutex_err: - kfree(cliprects); + /* intel_gpu_busy should also get a ref, so it will free when the device + * is really idle. */ + intel_runtime_pm_put(dev_priv); return ret; } @@ -1188,6 +1707,11 @@ return -EINVAL; } + if (args->rsvd2 != 0) { + DRM_DEBUG("dirty rvsd2 field\n"); + return -EINVAL; + } + exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); if (exec2_list == NULL) @@ -1211,7 +1735,7 @@ ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ - struct drm_i915_gem_exec_object2 *user_exec_list = + struct drm_i915_gem_exec_object2 __user *user_exec_list = to_user_ptr(args->buffers_ptr); int i;