--- zzzz-none-000/linux-3.10.107/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2021-02-04 17:41:59.000000000 +0000 @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -29,24 +29,46 @@ #include #include +struct vmw_temp_set_context { + SVGA3dCmdHeader header; + SVGA3dCmdDXTempSetContext body; +}; + bool vmw_fifo_have_3d(struct vmw_private *dev_priv) { - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 *fifo_mem = dev_priv->mmio_virt; uint32_t fifo_min, hwversion; const struct vmw_fifo_state *fifo = &dev_priv->fifo; + if (!(dev_priv->capabilities & SVGA_CAP_3D)) + return false; + + if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { + uint32_t result; + + if (!dev_priv->has_mob) + return false; + + spin_lock(&dev_priv->cap_lock); + vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); + result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); + spin_unlock(&dev_priv->cap_lock); + + return (result != 0); + } + if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) return false; - fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN); + fifo_min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN); if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) return false; - hwversion = ioread32(fifo_mem + - ((fifo->capabilities & - SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ? - SVGA_FIFO_3D_HWVERSION_REVISED : - SVGA_FIFO_3D_HWVERSION)); + hwversion = vmw_mmio_read(fifo_mem + + ((fifo->capabilities & + SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ? + SVGA_FIFO_3D_HWVERSION_REVISED : + SVGA_FIFO_3D_HWVERSION)); if (hwversion == 0) return false; @@ -54,8 +76,8 @@ if (hwversion < SVGA3D_HWVERSION_WS8_B1) return false; - /* Non-Screen Object path does not support surfaces */ - if (!dev_priv->sou_priv) + /* Legacy Display Unit does not support surfaces */ + if (dev_priv->active_display_unit == vmw_du_legacy) return false; return true; @@ -63,13 +85,13 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) { - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 *fifo_mem = dev_priv->mmio_virt; uint32_t caps; if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) return false; - caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES); + caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES); if (caps & SVGA_FIFO_CAP_PITCHLOCK) return true; @@ -78,11 +100,11 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) { - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 *fifo_mem = dev_priv->mmio_virt; uint32_t max; uint32_t min; - uint32_t dummy; + fifo->dx = false; fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; fifo->static_buffer = vmalloc(fifo->static_buffer_size); if (unlikely(fifo->static_buffer == NULL)) @@ -95,19 +117,17 @@ mutex_init(&fifo->fifo_mutex); init_rwsem(&fifo->rwsem); - /* - * Allow mapping the first page read-only to user-space. - */ - DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH)); DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); - mutex_lock(&dev_priv->hw_mutex); dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); - vmw_write(dev_priv, SVGA_REG_ENABLE, 1); + + vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE | + SVGA_REG_ENABLE_HIDE); + vmw_write(dev_priv, SVGA_REG_TRACES, 0); min = 4; if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO) @@ -117,20 +137,19 @@ if (min < PAGE_SIZE) min = PAGE_SIZE; - iowrite32(min, fifo_mem + SVGA_FIFO_MIN); - iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX); + vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN); + vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX); wmb(); - iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD); - iowrite32(min, fifo_mem + SVGA_FIFO_STOP); - iowrite32(0, fifo_mem + SVGA_FIFO_BUSY); + vmw_mmio_write(min, fifo_mem + SVGA_FIFO_NEXT_CMD); + vmw_mmio_write(min, fifo_mem + SVGA_FIFO_STOP); + vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY); mb(); vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); - mutex_unlock(&dev_priv->hw_mutex); - max = ioread32(fifo_mem + SVGA_FIFO_MAX); - min = ioread32(fifo_mem + SVGA_FIFO_MIN); - fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES); + max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX); + min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN); + fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES); DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n", (unsigned int) max, @@ -138,36 +157,31 @@ (unsigned int) fifo->capabilities); atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); - iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE); + vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE); vmw_marker_queue_init(&fifo->marker_queue); - return vmw_fifo_send_fence(dev_priv, &dummy); + + return 0; } void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) { - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 *fifo_mem = dev_priv->mmio_virt; - mutex_lock(&dev_priv->hw_mutex); - - if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { - iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); + preempt_disable(); + if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0) vmw_write(dev_priv, SVGA_REG_SYNC, reason); - } - - mutex_unlock(&dev_priv->hw_mutex); + preempt_enable(); } void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) { - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; - - mutex_lock(&dev_priv->hw_mutex); + u32 *fifo_mem = dev_priv->mmio_virt; vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) ; - dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); + dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, dev_priv->config_done_state); @@ -176,7 +190,6 @@ vmw_write(dev_priv, SVGA_REG_TRACES, dev_priv->traces_state); - mutex_unlock(&dev_priv->hw_mutex); vmw_marker_queue_takedown(&fifo->marker_queue); if (likely(fifo->static_buffer != NULL)) { @@ -192,11 +205,11 @@ static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes) { - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; - uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); - uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); - uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); - uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP); + u32 *fifo_mem = dev_priv->mmio_virt; + uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX); + uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD); + uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN); + uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP); return ((max - next_cmd) + (stop - min) <= bytes); } @@ -239,7 +252,6 @@ unsigned long timeout) { long ret = 1L; - unsigned long irq_flags; if (likely(!vmw_fifo_is_full(dev_priv, bytes))) return 0; @@ -249,16 +261,8 @@ return vmw_fifo_wait_noirq(dev_priv, bytes, interruptible, timeout); - mutex_lock(&dev_priv->hw_mutex); - if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { - spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); - outl(SVGA_IRQFLAG_FIFO_PROGRESS, - dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); - dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS; - vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); - spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); - } - mutex_unlock(&dev_priv->hw_mutex); + vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS, + &dev_priv->fifo_queue_waiters); if (interruptible) ret = wait_event_interruptible_timeout @@ -274,14 +278,8 @@ else if (likely(ret > 0)) ret = 0; - mutex_lock(&dev_priv->hw_mutex); - if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { - spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); - dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; - vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); - spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); - } - mutex_unlock(&dev_priv->hw_mutex); + vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS, + &dev_priv->fifo_queue_waiters); return ret; } @@ -296,10 +294,11 @@ * Returns: * Pointer to the fifo, or null on error (possible hardware hang). */ -void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) +static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, + uint32_t bytes) { struct vmw_fifo_state *fifo_state = &dev_priv->fifo; - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 *fifo_mem = dev_priv->mmio_virt; uint32_t max; uint32_t min; uint32_t next_cmd; @@ -307,9 +306,9 @@ int ret; mutex_lock(&fifo_state->fifo_mutex); - max = ioread32(fifo_mem + SVGA_FIFO_MAX); - min = ioread32(fifo_mem + SVGA_FIFO_MIN); - next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); + max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX); + min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN); + next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD); if (unlikely(bytes >= (max - min))) goto out_err; @@ -320,7 +319,7 @@ fifo_state->reserved_size = bytes; while (1) { - uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP); + uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP); bool need_bounce = false; bool reserve_in_place = false; @@ -354,9 +353,10 @@ fifo_state->using_bounce_buffer = false; if (reserveable) - iowrite32(bytes, fifo_mem + - SVGA_FIFO_RESERVED); - return fifo_mem + (next_cmd >> 2); + vmw_mmio_write(bytes, fifo_mem + + SVGA_FIFO_RESERVED); + return (void __force *) (fifo_mem + + (next_cmd >> 2)); } else { need_bounce = true; } @@ -375,11 +375,36 @@ out_err: fifo_state->reserved_size = 0; mutex_unlock(&fifo_state->fifo_mutex); + return NULL; } +void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, + int ctx_id) +{ + void *ret; + + if (dev_priv->cman) + ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes, + ctx_id, false, NULL); + else if (ctx_id == SVGA3D_INVALID_ID) + ret = vmw_local_fifo_reserve(dev_priv, bytes); + else { + WARN(1, "Command buffer has not been allocated.\n"); + ret = NULL; + } + if (IS_ERR_OR_NULL(ret)) { + DRM_ERROR("Fifo reserve failure of %u bytes.\n", + (unsigned) bytes); + dump_stack(); + return NULL; + } + + return ret; +} + static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, - __le32 __iomem *fifo_mem, + u32 *fifo_mem, uint32_t next_cmd, uint32_t max, uint32_t min, uint32_t bytes) { @@ -391,17 +416,16 @@ if (bytes < chunk_size) chunk_size = bytes; - iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED); + vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED); mb(); - memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size); + memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size); rest = bytes - chunk_size; if (rest) - memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), - rest); + memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest); } static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, - __le32 __iomem *fifo_mem, + u32 *fifo_mem, uint32_t next_cmd, uint32_t max, uint32_t min, uint32_t bytes) { @@ -409,26 +433,30 @@ fifo_state->dynamic_buffer : fifo_state->static_buffer; while (bytes > 0) { - iowrite32(*buffer++, fifo_mem + (next_cmd >> 2)); + vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2)); next_cmd += sizeof(uint32_t); if (unlikely(next_cmd == max)) next_cmd = min; mb(); - iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD); + vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD); mb(); bytes -= sizeof(uint32_t); } } -void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) +static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) { struct vmw_fifo_state *fifo_state = &dev_priv->fifo; - __le32 __iomem *fifo_mem = dev_priv->mmio_virt; - uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); - uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); - uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); + u32 *fifo_mem = dev_priv->mmio_virt; + uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD); + uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX); + uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN); bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; + if (fifo_state->dx) + bytes += sizeof(struct vmw_temp_set_context); + + fifo_state->dx = false; BUG_ON((bytes & 3) != 0); BUG_ON(bytes > fifo_state->reserved_size); @@ -455,24 +483,64 @@ if (next_cmd >= max) next_cmd -= max - min; mb(); - iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD); + vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD); } if (reserveable) - iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); + vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED); mb(); up_write(&fifo_state->rwsem); vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); mutex_unlock(&fifo_state->fifo_mutex); } +void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) +{ + if (dev_priv->cman) + vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false); + else + vmw_local_fifo_commit(dev_priv, bytes); +} + + +/** + * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands. + * + * @dev_priv: Pointer to device private structure. + * @bytes: Number of bytes to commit. + */ +void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes) +{ + if (dev_priv->cman) + vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true); + else + vmw_local_fifo_commit(dev_priv, bytes); +} + +/** + * vmw_fifo_flush - Flush any buffered commands and make sure command processing + * starts. + * + * @dev_priv: Pointer to device private structure. + * @interruptible: Whether to wait interruptible if function needs to sleep. + */ +int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible) +{ + might_sleep(); + + if (dev_priv->cman) + return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible); + else + return 0; +} + int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) { struct vmw_fifo_state *fifo_state = &dev_priv->fifo; struct svga_fifo_cmd_fence *cmd_fence; - void *fm; + u32 *fm; int ret = 0; - uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence); + uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence); fm = vmw_fifo_reserve(dev_priv, bytes); if (unlikely(fm == NULL)) { @@ -498,12 +566,10 @@ return 0; } - *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE); - cmd_fence = (struct svga_fifo_cmd_fence *) - ((unsigned long)fm + sizeof(__le32)); - - iowrite32(*seqno, &cmd_fence->fence); - vmw_fifo_commit(dev_priv, bytes); + *fm++ = SVGA_CMD_FENCE; + cmd_fence = (struct svga_fifo_cmd_fence *) fm; + cmd_fence->fence = *seqno; + vmw_fifo_commit_flush(dev_priv, bytes); (void) vmw_marker_push(&fifo_state->marker_queue, *seqno); vmw_update_seqno(dev_priv, fifo_state); @@ -512,24 +578,16 @@ } /** - * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo. + * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using + * legacy query commands. * * @dev_priv: The device private structure. * @cid: The hardware context id used for the query. * - * This function is used to emit a dummy occlusion query with - * no primitives rendered between query begin and query end. - * It's used to provide a query barrier, in order to know that when - * this query is finished, all preceding queries are also finished. - * - * A Query results structure should have been initialized at the start - * of the dev_priv->dummy_query_bo buffer object. And that buffer object - * must also be either reserved or pinned when this function is called. - * - * Returns -ENOMEM on failure to reserve fifo space. + * See the vmw_fifo_emit_dummy_query documentation. */ -int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, - uint32_t cid) +static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv, + uint32_t cid) { /* * A query wait without a preceding query end will @@ -537,7 +595,7 @@ * without writing to the query result structure. */ - struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; + struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base; struct { SVGA3dCmdHeader header; SVGA3dCmdWaitForQuery body; @@ -567,3 +625,80 @@ return 0; } + +/** + * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using + * guest-backed resource query commands. + * + * @dev_priv: The device private structure. + * @cid: The hardware context id used for the query. + * + * See the vmw_fifo_emit_dummy_query documentation. + */ +static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv, + uint32_t cid) +{ + /* + * A query wait without a preceding query end will + * actually finish all queries for this cid + * without writing to the query result structure. + */ + + struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdWaitForGBQuery body; + } *cmd; + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + + if (unlikely(cmd == NULL)) { + DRM_ERROR("Out of fifo space for dummy query.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = cid; + cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); + cmd->body.mobid = bo->mem.start; + cmd->body.offset = 0; + + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + return 0; +} + + +/** + * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using + * appropriate resource query commands. + * + * @dev_priv: The device private structure. + * @cid: The hardware context id used for the query. + * + * This function is used to emit a dummy occlusion query with + * no primitives rendered between query begin and query end. + * It's used to provide a query barrier, in order to know that when + * this query is finished, all preceding queries are also finished. + * + * A Query results structure should have been initialized at the start + * of the dev_priv->dummy_query_bo buffer object. And that buffer object + * must also be either reserved or pinned when this function is called. + * + * Returns -ENOMEM on failure to reserve fifo space. + */ +int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, + uint32_t cid) +{ + if (dev_priv->has_mob) + return vmw_fifo_emit_dummy_gb_query(dev_priv, cid); + + return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); +} + +void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) +{ + return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID); +}