--- zzzz-none-000/linux-3.10.107/drivers/usb/host/xhci-ring.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/usb/host/xhci-ring.c 2021-02-04 17:41:59.000000000 +0000 @@ -67,10 +67,7 @@ #include #include #include "xhci.h" - -static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, - struct xhci_virt_device *virt_dev, - struct xhci_event_cmd *event); +#include "xhci-trace.h" /* * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA @@ -122,16 +119,6 @@ return TRB_TYPE_LINK_LE32(link->control); } -union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring) -{ - /* Enqueue pointer can be left pointing to the link TRB, - * we must handle that - */ - if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control)) - return ring->enq_seg->next->trbs; - return ring->enqueue; -} - /* Updates trb to point to the next TRB in the ring, and updates seg if the next * TRB is in a new segment. This does not skip over link TRBs, and it does not * effect the ring dequeue or enqueue pointers. @@ -155,8 +142,6 @@ */ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) { - unsigned long long addr; - ring->deq_updates++; /* @@ -177,7 +162,7 @@ if (ring->type == TYPE_EVENT && last_trb_on_last_seg(xhci, ring, ring->deq_seg, ring->dequeue)) { - ring->cycle_state = (ring->cycle_state ? 0 : 1); + ring->cycle_state ^= 1; } ring->deq_seg = ring->deq_seg->next; ring->dequeue = ring->deq_seg->trbs; @@ -185,8 +170,6 @@ ring->dequeue++; } } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)); - - addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); } /* @@ -211,7 +194,6 @@ { u32 chain; union xhci_trb *next; - unsigned long long addr; chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; /* If this is not event ring, there is one less usable TRB */ @@ -256,14 +238,13 @@ /* Toggle the cycle bit after the last ring segment. */ if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { - ring->cycle_state = (ring->cycle_state ? 0 : 1); + ring->cycle_state ^= 1; } } ring->enq_seg = ring->enq_seg->next; ring->enqueue = ring->enq_seg->trbs; next = ring->enqueue; } - addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); } /* @@ -294,30 +275,81 @@ return; xhci_dbg(xhci, "// Ding dong!\n"); - xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]); + writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]); /* Flush PCI posted writes */ - xhci_readl(xhci, &xhci->dba->doorbell[0]); + readl(&xhci->dba->doorbell[0]); +} + +static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay) +{ + return mod_delayed_work(system_wq, &xhci->cmd_timer, delay); +} + +static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci) +{ + return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command, + cmd_list); +} + +/* + * Turn all commands on command ring with status set to "aborted" to no-op trbs. + * If there are other commands waiting then restart the ring and kick the timer. + * This must be called with command ring stopped and xhci->lock held. + */ +static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, + struct xhci_command *cur_cmd) +{ + struct xhci_command *i_cmd; + u32 cycle_state; + + /* Turn all aborted commands in list to no-ops, then restart */ + list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) { + + if (i_cmd->status != COMP_CMD_ABORT) + continue; + + i_cmd->status = COMP_CMD_STOP; + + xhci_dbg(xhci, "Turn aborted command %p to no-op\n", + i_cmd->command_trb); + /* get cycle state from the original cmd trb */ + cycle_state = le32_to_cpu( + i_cmd->command_trb->generic.field[3]) & TRB_CYCLE; + /* modify the command trb to no-op command */ + i_cmd->command_trb->generic.field[0] = 0; + i_cmd->command_trb->generic.field[1] = 0; + i_cmd->command_trb->generic.field[2] = 0; + i_cmd->command_trb->generic.field[3] = cpu_to_le32( + TRB_TYPE(TRB_CMD_NOOP) | cycle_state); + + /* + * caller waiting for completion is called when command + * completion event is received for these no-op commands + */ + } + + xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; + + /* ring command ring doorbell to restart the command ring */ + if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && + !(xhci->xhc_state & XHCI_STATE_DYING)) { + xhci->current_cmd = cur_cmd; + xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); + xhci_ring_cmd_db(xhci); + } } -static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) +/* Must be called with xhci->lock held, releases and aquires lock back */ +static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) { u64 temp_64; int ret; xhci_dbg(xhci, "Abort command ring\n"); - if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) { - xhci_dbg(xhci, "The command ring isn't running, " - "Have the command ring been stopped?\n"); - return 0; - } + reinit_completion(&xhci->cmd_ring_stop_completion); temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); - if (!(temp_64 & CMD_RING_RUNNING)) { - xhci_dbg(xhci, "Command ring had been stopped\n"); - return 0; - } - xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); @@ -328,83 +360,42 @@ * seconds), then it should assume that the there are * larger problems with the xHC and assert HCRST. */ - ret = xhci_handshake(xhci, &xhci->op_regs->cmd_ring, + ret = xhci_handshake(&xhci->op_regs->cmd_ring, CMD_RING_RUNNING, 0, 5 * 1000 * 1000); if (ret < 0) { - xhci_err(xhci, "Stopped the command ring failed, " - "maybe the host is dead\n"); - xhci->xhc_state |= XHCI_STATE_DYING; - xhci_quiesce(xhci); - xhci_halt(xhci); - return -ESHUTDOWN; + /* we are about to kill xhci, give it one more chance */ + xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, + &xhci->op_regs->cmd_ring); + udelay(1000); + ret = xhci_handshake(&xhci->op_regs->cmd_ring, + CMD_RING_RUNNING, 0, 3 * 1000 * 1000); + if (ret < 0) { + xhci_err(xhci, "Stopped the command ring failed, " + "maybe the host is dead\n"); + xhci->xhc_state |= XHCI_STATE_DYING; + xhci_quiesce(xhci); + xhci_halt(xhci); + return -ESHUTDOWN; + } } - - return 0; -} - -static int xhci_queue_cd(struct xhci_hcd *xhci, - struct xhci_command *command, - union xhci_trb *cmd_trb) -{ - struct xhci_cd *cd; - cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC); - if (!cd) - return -ENOMEM; - INIT_LIST_HEAD(&cd->cancel_cmd_list); - - cd->command = command; - cd->cmd_trb = cmd_trb; - list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list); - - return 0; -} - -/* - * Cancel the command which has issue. - * - * Some commands may hang due to waiting for acknowledgement from - * usb device. It is outside of the xHC's ability to control and - * will cause the command ring is blocked. When it occurs software - * should intervene to recover the command ring. - * See Section 4.6.1.1 and 4.6.1.2 - */ -int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command, - union xhci_trb *cmd_trb) -{ - int retval = 0; - unsigned long flags; - + /* + * Writing the CMD_RING_ABORT bit should cause a cmd completion event, + * however on some host hw the CMD_RING_RUNNING bit is correctly cleared + * but the completion event in never sent. Wait 2 secs (arbitrary + * number) to handle those cases after negation of CMD_RING_RUNNING. + */ + spin_unlock_irqrestore(&xhci->lock, flags); + ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion, + msecs_to_jiffies(2000)); spin_lock_irqsave(&xhci->lock, flags); - - if (xhci->xhc_state & XHCI_STATE_DYING) { - xhci_warn(xhci, "Abort the command ring," - " but the xHCI is dead.\n"); - retval = -ESHUTDOWN; - goto fail; - } - - /* queue the cmd desriptor to cancel_cmd_list */ - retval = xhci_queue_cd(xhci, command, cmd_trb); - if (retval) { - xhci_warn(xhci, "Queuing command descriptor failed.\n"); - goto fail; - } - - /* abort command ring */ - retval = xhci_abort_cmd_ring(xhci); - if (retval) { - xhci_err(xhci, "Abort command ring failed\n"); - if (unlikely(retval == -ESHUTDOWN)) { - spin_unlock_irqrestore(&xhci->lock, flags); - usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); - xhci_dbg(xhci, "xHCI host controller is dead.\n"); - return retval; - } + if (!ret) { + xhci_dbg(xhci, "No stop event for abort, ring start fail?\n"); + xhci_cleanup_command_queue(xhci); + } else { + xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci)); } -fail: - spin_unlock_irqrestore(&xhci->lock, flags); - return retval; + return 0; } void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, @@ -421,12 +412,11 @@ * We don't want to restart any stream rings if there's a set dequeue * pointer command pending because the device can choose to start any * stream once the endpoint is on the HW schedule. - * FIXME - check all the stream rings for pending cancellations. */ if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) || (ep_state & EP_HALTED)) return; - xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr); + writel(DB_VALUE(ep_index, stream_id), db_addr); /* The CPU has better things to do at this point than wait for a * write-posting flush. It'll get there soon enough. */ @@ -458,32 +448,6 @@ } } -/* - * Find the segment that trb is in. Start searching in start_seg. - * If we must move past a segment that has a link TRB with a toggle cycle state - * bit set, then we will toggle the value pointed at by cycle_state. - */ -static struct xhci_segment *find_trb_seg( - struct xhci_segment *start_seg, - union xhci_trb *trb, int *cycle_state) -{ - struct xhci_segment *cur_seg = start_seg; - struct xhci_generic_trb *generic_trb; - - while (cur_seg->trbs > trb || - &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { - generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; - if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE)) - *cycle_state ^= 0x1; - cur_seg = cur_seg->next; - if (cur_seg == start_seg) - /* Looped over the entire list. Oops! */ - return NULL; - } - return cur_seg; -} - - static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, unsigned int stream_id) @@ -551,10 +515,14 @@ struct xhci_dequeue_state *state) { struct xhci_virt_device *dev = xhci->devs[slot_id]; + struct xhci_virt_ep *ep = &dev->eps[ep_index]; struct xhci_ring *ep_ring; - struct xhci_generic_trb *trb; - struct xhci_ep_ctx *ep_ctx; + struct xhci_segment *new_seg; + union xhci_trb *new_deq; dma_addr_t addr; + u64 hw_dequeue; + bool cycle_found = false; + bool td_last_trb_found = false; ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, ep_index, stream_id); @@ -564,56 +532,71 @@ stream_id); return; } - state->new_cycle_state = 0; - xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); - state->new_deq_seg = find_trb_seg(cur_td->start_seg, - dev->eps[ep_index].stopped_trb, - &state->new_cycle_state); - if (!state->new_deq_seg) { - WARN_ON(1); - return; - } /* Dig out the cycle state saved by the xHC during the stop ep cmd */ - xhci_dbg(xhci, "Finding endpoint context\n"); - ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); - state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq); - - state->new_deq_ptr = cur_td->last_trb; - xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); - state->new_deq_seg = find_trb_seg(state->new_deq_seg, - state->new_deq_ptr, - &state->new_cycle_state); - if (!state->new_deq_seg) { - WARN_ON(1); - return; + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Finding endpoint context"); + /* 4.6.9 the css flag is written to the stream context for streams */ + if (ep->ep_state & EP_HAS_STREAMS) { + struct xhci_stream_ctx *ctx = + &ep->stream_info->stream_ctx_array[stream_id]; + hw_dequeue = le64_to_cpu(ctx->stream_ring); + } else { + struct xhci_ep_ctx *ep_ctx + = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); + hw_dequeue = le64_to_cpu(ep_ctx->deq); } - trb = &state->new_deq_ptr->generic; - if (TRB_TYPE_LINK_LE32(trb->field[3]) && - (trb->field[3] & cpu_to_le32(LINK_TOGGLE))) - state->new_cycle_state ^= 0x1; - next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); - - /* - * If there is only one segment in a ring, find_trb_seg()'s while loop - * will not run, and it will return before it has a chance to see if it - * needs to toggle the cycle bit. It can't tell if the stalled transfer - * ended just before the link TRB on a one-segment ring, or if the TD - * wrapped around the top of the ring, because it doesn't have the TD in - * question. Look for the one-segment case where stalled TRB's address - * is greater than the new dequeue pointer address. - */ - if (ep_ring->first_seg == ep_ring->first_seg->next && - state->new_deq_ptr < dev->eps[ep_index].stopped_trb) - state->new_cycle_state ^= 0x1; - xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state); + new_seg = ep_ring->deq_seg; + new_deq = ep_ring->dequeue; + state->new_cycle_state = hw_dequeue & 0x1; + + /* + * We want to find the pointer, segment and cycle state of the new trb + * (the one after current TD's last_trb). We know the cycle state at + * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are + * found. + */ + do { + if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq) + == (dma_addr_t)(hw_dequeue & ~0xf)) { + cycle_found = true; + if (td_last_trb_found) + break; + } + if (new_deq == cur_td->last_trb) + td_last_trb_found = true; + + if (cycle_found && + TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) && + new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE)) + state->new_cycle_state ^= 0x1; + + next_trb(xhci, ep_ring, &new_seg, &new_deq); + + /* Search wrapped around, bail out */ + if (new_deq == ep->ring->dequeue) { + xhci_err(xhci, "Error: Failed finding new dequeue state\n"); + state->new_deq_seg = NULL; + state->new_deq_ptr = NULL; + return; + } + + } while (!cycle_found || !td_last_trb_found); + + state->new_deq_seg = new_seg; + state->new_deq_ptr = new_deq; /* Don't update the ring cycle state for the producer (us). */ - xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Cycle state = 0x%x", state->new_cycle_state); + + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "New dequeue segment = %p (virtual)", state->new_deq_seg); addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); - xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "New dequeue pointer = 0x%llx (DMA)", (unsigned long long) addr); } @@ -641,9 +624,11 @@ if (flip_cycle) cur_trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); - xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); - xhci_dbg(xhci, "Address = %p (0x%llx dma); " - "in seg %p (0x%llx dma)\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Cancel (unchain) link TRB"); + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Address = %p (0x%llx dma); " + "in seg %p (0x%llx dma)", cur_trb, (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), cur_seg, @@ -661,7 +646,8 @@ cpu_to_le32(TRB_CYCLE); cur_trb->generic.field[3] |= cpu_to_le32( TRB_TYPE(TRB_TR_NOOP)); - xhci_dbg(xhci, "TRB to noop at offset 0x%llx\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "TRB to noop at offset 0x%llx", (unsigned long long) xhci_trb_virt_to_dma(cur_seg, cur_trb)); } @@ -670,37 +656,6 @@ } } -static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, - unsigned int ep_index, unsigned int stream_id, - struct xhci_segment *deq_seg, - union xhci_trb *deq_ptr, u32 cycle_state); - -void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, - unsigned int slot_id, unsigned int ep_index, - unsigned int stream_id, - struct xhci_dequeue_state *deq_state) -{ - struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; - - xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " - "new deq ptr = %p (0x%llx dma), new cycle = %u\n", - deq_state->new_deq_seg, - (unsigned long long)deq_state->new_deq_seg->dma, - deq_state->new_deq_ptr, - (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), - deq_state->new_cycle_state); - queue_set_tr_deq(xhci, slot_id, ep_index, stream_id, - deq_state->new_deq_seg, - deq_state->new_deq_ptr, - (u32) deq_state->new_cycle_state); - /* Stop the TD queueing code from ringing the doorbell until - * this command completes. The HC won't set the dequeue pointer - * if the ring is running, and ringing the doorbell starts the - * ring running. - */ - ep->ep_state |= SET_DEQ_PENDING; -} - static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, struct xhci_virt_ep *ep) { @@ -715,7 +670,7 @@ /* Must be called with xhci->lock held in interrupt context */ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, - struct xhci_td *cur_td, int status, char *adjective) + struct xhci_td *cur_td, int status) { struct usb_hcd *hcd; struct urb *urb; @@ -739,7 +694,7 @@ spin_unlock(&xhci->lock); usb_hcd_giveback_urb(hcd, urb, status); - xhci_urb_free_priv(xhci, urb_priv); + xhci_urb_free_priv(urb_priv); spin_lock(&xhci->lock); } } @@ -754,12 +709,10 @@ * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain * bit cleared) so that the HW will skip over them. */ -static void handle_stopped_endpoint(struct xhci_hcd *xhci, +static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, union xhci_trb *trb, struct xhci_event_cmd *event) { - unsigned int slot_id; unsigned int ep_index; - struct xhci_virt_device *virt_dev; struct xhci_ring *ep_ring; struct xhci_virt_ep *ep; struct list_head *entry; @@ -768,15 +721,8 @@ struct xhci_dequeue_state deq_state; - if (unlikely(TRB_TO_SUSPEND_PORT( - le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) { - slot_id = TRB_TO_SLOT_ID( - le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])); - virt_dev = xhci->devs[slot_id]; - if (virt_dev) - handle_cmd_in_cmd_wait_list(xhci, virt_dev, - event); - else + if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { + if (!xhci->devs[slot_id]) xhci_warn(xhci, "Stop endpoint command " "completion for disabled slot %u\n", slot_id); @@ -784,14 +730,12 @@ } memset(&deq_state, 0, sizeof(deq_state)); - slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3])); ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); ep = &xhci->devs[slot_id]->eps[ep_index]; if (list_empty(&ep->cancelled_td_list)) { xhci_stop_watchdog_timer_in_irq(xhci, ep); ep->stopped_td = NULL; - ep->stopped_trb = NULL; ring_doorbell_for_active_rings(xhci, slot_id, ep_index); return; } @@ -803,7 +747,8 @@ */ list_for_each(entry, &ep->cancelled_td_list) { cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); - xhci_dbg(xhci, "Removing canceled TD starting at 0x%llx (dma).\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Removing canceled TD starting at 0x%llx (dma).", (unsigned long long)xhci_trb_virt_to_dma( cur_td->start_seg, cur_td->first_trb)); ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); @@ -848,21 +793,15 @@ /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { - xhci_queue_new_dequeue_state(xhci, - slot_id, ep_index, - ep->stopped_td->urb->stream_id, - &deq_state); + xhci_queue_new_dequeue_state(xhci, slot_id, ep_index, + ep->stopped_td->urb->stream_id, &deq_state); xhci_ring_cmd_db(xhci); } else { /* Otherwise ring the doorbell(s) to restart queued transfers */ ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } - /* Clear stopped_td and stopped_trb if endpoint is not halted */ - if (!(ep->ep_state & EP_HALTED)) { - ep->stopped_td = NULL; - ep->stopped_trb = NULL; - } + ep->stopped_td = NULL; /* * Drop the lock and complete the URBs in the cancelled TD list. @@ -879,7 +818,7 @@ /* Doesn't matter what we pass for status, since the core will * just overwrite it (because the URB has been unlinked). */ - xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled"); + xhci_giveback_urb_in_irq(xhci, cur_td, 0); /* Stop processing the cancelled list if the watchdog timer is * running. @@ -891,6 +830,57 @@ /* Return to the event handler with xhci->lock re-acquired */ } +static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) +{ + struct xhci_td *cur_td; + + while (!list_empty(&ring->td_list)) { + cur_td = list_first_entry(&ring->td_list, + struct xhci_td, td_list); + list_del_init(&cur_td->td_list); + if (!list_empty(&cur_td->cancelled_td_list)) + list_del_init(&cur_td->cancelled_td_list); + xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); + } +} + +static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, + int slot_id, int ep_index) +{ + struct xhci_td *cur_td; + struct xhci_virt_ep *ep; + struct xhci_ring *ring; + + ep = &xhci->devs[slot_id]->eps[ep_index]; + if ((ep->ep_state & EP_HAS_STREAMS) || + (ep->ep_state & EP_GETTING_NO_STREAMS)) { + int stream_id; + + for (stream_id = 0; stream_id < ep->stream_info->num_streams; + stream_id++) { + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Killing URBs for slot ID %u, ep index %u, stream %u", + slot_id, ep_index, stream_id + 1); + xhci_kill_ring_urbs(xhci, + ep->stream_info->stream_rings[stream_id]); + } + } else { + ring = ep->ring; + if (!ring) + return; + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Killing URBs for slot ID %u, ep index %u", + slot_id, ep_index); + xhci_kill_ring_urbs(xhci, ring); + } + while (!list_empty(&ep->cancelled_td_list)) { + cur_td = list_first_entry(&ep->cancelled_td_list, + struct xhci_td, cancelled_td_list); + list_del_init(&cur_td->cancelled_td_list); + xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); + } +} + /* Watchdog timer function for when a stop endpoint command fails to complete. * In this case, we assume the host controller is broken or dying or dead. The * host may still be completing some other events, so we have to be careful to @@ -914,9 +904,6 @@ { struct xhci_hcd *xhci; struct xhci_virt_ep *ep; - struct xhci_virt_ep *temp_ep; - struct xhci_ring *ring; - struct xhci_td *cur_td; int ret, i, j; unsigned long flags; @@ -926,15 +913,10 @@ spin_lock_irqsave(&xhci->lock, flags); ep->stop_cmds_pending--; - if (xhci->xhc_state & XHCI_STATE_DYING) { - xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " - "xHCI as DYING, exiting.\n"); - spin_unlock_irqrestore(&xhci->lock, flags); - return; - } if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { - xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " - "exiting.\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Stop EP timer ran, but no command pending, " + "exiting."); spin_unlock_irqrestore(&xhci->lock, flags); return; } @@ -971,38 +953,15 @@ for (i = 0; i < MAX_HC_SLOTS; i++) { if (!xhci->devs[i]) continue; - for (j = 0; j < 31; j++) { - temp_ep = &xhci->devs[i]->eps[j]; - ring = temp_ep->ring; - if (!ring) - continue; - xhci_dbg(xhci, "Killing URBs for slot ID %u, " - "ep index %u\n", i, j); - while (!list_empty(&ring->td_list)) { - cur_td = list_first_entry(&ring->td_list, - struct xhci_td, - td_list); - list_del_init(&cur_td->td_list); - if (!list_empty(&cur_td->cancelled_td_list)) - list_del_init(&cur_td->cancelled_td_list); - xhci_giveback_urb_in_irq(xhci, cur_td, - -ESHUTDOWN, "killed"); - } - while (!list_empty(&temp_ep->cancelled_td_list)) { - cur_td = list_first_entry( - &temp_ep->cancelled_td_list, - struct xhci_td, - cancelled_td_list); - list_del_init(&cur_td->cancelled_td_list); - xhci_giveback_urb_in_irq(xhci, cur_td, - -ESHUTDOWN, "killed"); - } - } + for (j = 0; j < 31; j++) + xhci_kill_endpoint_urbs(xhci, i, j); } spin_unlock_irqrestore(&xhci->lock, flags); - xhci_dbg(xhci, "Calling usb_hc_died()\n"); - usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); - xhci_dbg(xhci, "xHCI host controller is dead.\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Calling usb_hc_died()"); + usb_hc_died(xhci_to_hcd(xhci)); + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "xHCI host controller is dead."); } @@ -1060,63 +1019,58 @@ * endpoint doorbell to restart the ring, but only if there aren't more * cancellations pending. */ -static void handle_set_deq_completion(struct xhci_hcd *xhci, - struct xhci_event_cmd *event, - union xhci_trb *trb) +static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, + union xhci_trb *trb, u32 cmd_comp_code) { - unsigned int slot_id; unsigned int ep_index; unsigned int stream_id; struct xhci_ring *ep_ring; struct xhci_virt_device *dev; + struct xhci_virt_ep *ep; struct xhci_ep_ctx *ep_ctx; struct xhci_slot_ctx *slot_ctx; - slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3])); ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); dev = xhci->devs[slot_id]; + ep = &dev->eps[ep_index]; ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); if (!ep_ring) { - xhci_warn(xhci, "WARN Set TR deq ptr command for " - "freed stream ID %u\n", + xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n", stream_id); /* XXX: Harmless??? */ - dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; - return; + goto cleanup; } ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); - if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) { + if (cmd_comp_code != COMP_SUCCESS) { unsigned int ep_state; unsigned int slot_state; - switch (GET_COMP_CODE(le32_to_cpu(event->status))) { + switch (cmd_comp_code) { case COMP_TRB_ERR: - xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because " - "of stream ID configuration\n"); + xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n"); break; case COMP_CTX_STATE: - xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " - "to incorrect slot or ep state.\n"); + xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); ep_state = le32_to_cpu(ep_ctx->ep_info); ep_state &= EP_STATE_MASK; slot_state = le32_to_cpu(slot_ctx->dev_state); slot_state = GET_SLOT_STATE(slot_state); - xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Slot state = %u, EP state = %u", slot_state, ep_state); break; case COMP_EBADSLT: - xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because " - "slot %u was not enabled.\n", slot_id); + xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n", + slot_id); break; default: - xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown " - "completion code of %u.\n", - GET_COMP_CODE(le32_to_cpu(event->status))); + xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n", + cmd_comp_code); break; } /* OK what do we do now? The endpoint state is hosed, and we @@ -1126,25 +1080,32 @@ * cancelling URBs, which might not be an error... */ } else { - xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", - le64_to_cpu(ep_ctx->deq)); - if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, - dev->eps[ep_index].queued_deq_ptr) == - (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) { + u64 deq; + /* 4.6.10 deq ptr is written to the stream ctx for streams */ + if (ep->ep_state & EP_HAS_STREAMS) { + struct xhci_stream_ctx *ctx = + &ep->stream_info->stream_ctx_array[stream_id]; + deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK; + } else { + deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; + } + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq); + if (xhci_trb_virt_to_dma(ep->queued_deq_seg, + ep->queued_deq_ptr) == deq) { /* Update the ring's dequeue segment and dequeue pointer * to reflect the new position. */ update_ring_for_set_deq_completion(xhci, dev, ep_ring, ep_index); } else { - xhci_warn(xhci, "Mismatch between completed Set TR Deq " - "Ptr command & xHCI internal state.\n"); + xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n"); xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", - dev->eps[ep_index].queued_deq_seg, - dev->eps[ep_index].queued_deq_ptr); + ep->queued_deq_seg, ep->queued_deq_ptr); } } +cleanup: dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; dev->eps[ep_index].queued_deq_seg = NULL; dev->eps[ep_index].queued_deq_ptr = NULL; @@ -1152,28 +1113,32 @@ ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } -static void handle_reset_ep_completion(struct xhci_hcd *xhci, - struct xhci_event_cmd *event, - union xhci_trb *trb) +static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, + union xhci_trb *trb, u32 cmd_comp_code) { - int slot_id; unsigned int ep_index; - slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3])); ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); /* This command will only fail if the endpoint wasn't halted, * but we don't care. */ - xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", - GET_COMP_CODE(le32_to_cpu(event->status))); + xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, + "Ignoring reset ep completion code of %u", cmd_comp_code); /* HW with the reset endpoint quirk needs to have a configure endpoint * command complete before the endpoint can be used. Queue that here * because the HW can't handle two commands being queued in a row. */ if (xhci->quirks & XHCI_RESET_EP_QUIRK) { - xhci_dbg(xhci, "Queueing configure endpoint command\n"); - xhci_queue_configure_endpoint(xhci, + struct xhci_command *command; + command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); + if (!command) { + xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n"); + return; + } + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Queueing configure endpoint command"); + xhci_queue_configure_endpoint(xhci, command, xhci->devs[slot_id]->in_ctx->dma, slot_id, false); xhci_ring_cmd_db(xhci); @@ -1183,185 +1148,180 @@ } } -/* Complete the command and detele it from the devcie's command queue. - */ -static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, - struct xhci_command *command, u32 status) +static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, + u32 cmd_comp_code) { - command->status = status; - list_del(&command->cmd_list); - if (command->completion) - complete(command->completion); + if (cmd_comp_code == COMP_SUCCESS) + xhci->slot_id = slot_id; else - xhci_free_command(xhci, command); + xhci->slot_id = 0; } - -/* Check to see if a command in the device's command queue matches this one. - * Signal the completion or free the command, and return 1. Return 0 if the - * completed command isn't at the head of the command list. - */ -static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, - struct xhci_virt_device *virt_dev, - struct xhci_event_cmd *event) +static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) { - struct xhci_command *command; - - if (list_empty(&virt_dev->cmd_list)) - return 0; - - command = list_entry(virt_dev->cmd_list.next, - struct xhci_command, cmd_list); - if (xhci->cmd_ring->dequeue != command->command_trb) - return 0; + struct xhci_virt_device *virt_dev; - xhci_complete_cmd_in_cmd_wait_list(xhci, command, - GET_COMP_CODE(le32_to_cpu(event->status))); - return 1; + virt_dev = xhci->devs[slot_id]; + if (!virt_dev) + return; + if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) + /* Delete default control endpoint resources */ + xhci_free_device_endpoint_resources(xhci, virt_dev, true); + xhci_free_virt_device(xhci, slot_id); } -/* - * Finding the command trb need to be cancelled and modifying it to - * NO OP command. And if the command is in device's command wait - * list, finishing and freeing it. - * - * If we can't find the command trb, we think it had already been - * executed. - */ -static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd) +static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, + struct xhci_event_cmd *event, u32 cmd_comp_code) { - struct xhci_segment *cur_seg; - union xhci_trb *cmd_trb; - u32 cycle_state; - - if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue) - return; + struct xhci_virt_device *virt_dev; + struct xhci_input_control_ctx *ctrl_ctx; + unsigned int ep_index; + unsigned int ep_state; + u32 add_flags, drop_flags; - /* find the current segment of command ring */ - cur_seg = find_trb_seg(xhci->cmd_ring->first_seg, - xhci->cmd_ring->dequeue, &cycle_state); - - if (!cur_seg) { - xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n", - xhci->cmd_ring->dequeue, - (unsigned long long) - xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, - xhci->cmd_ring->dequeue)); - xhci_debug_ring(xhci, xhci->cmd_ring); - xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); + /* + * Configure endpoint commands can come from the USB core + * configuration or alt setting changes, or because the HW + * needed an extra configure endpoint command after a reset + * endpoint command or streams were being configured. + * If the command was for a halted endpoint, the xHCI driver + * is not waiting on the configure endpoint command. + */ + virt_dev = xhci->devs[slot_id]; + ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); + if (!ctrl_ctx) { + xhci_warn(xhci, "Could not get input context, bad type.\n"); return; } - /* find the command trb matched by cd from command ring */ - for (cmd_trb = xhci->cmd_ring->dequeue; - cmd_trb != xhci->cmd_ring->enqueue; - next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) { - /* If the trb is link trb, continue */ - if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3])) - continue; - - if (cur_cd->cmd_trb == cmd_trb) { - - /* If the command in device's command list, we should - * finish it and free the command structure. - */ - if (cur_cd->command) - xhci_complete_cmd_in_cmd_wait_list(xhci, - cur_cd->command, COMP_CMD_STOP); - - /* get cycle state from the origin command trb */ - cycle_state = le32_to_cpu(cmd_trb->generic.field[3]) - & TRB_CYCLE; - - /* modify the command trb to NO OP command */ - cmd_trb->generic.field[0] = 0; - cmd_trb->generic.field[1] = 0; - cmd_trb->generic.field[2] = 0; - cmd_trb->generic.field[3] = cpu_to_le32( - TRB_TYPE(TRB_CMD_NOOP) | cycle_state); - break; - } + add_flags = le32_to_cpu(ctrl_ctx->add_flags); + drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); + /* Input ctx add_flags are the endpoint index plus one */ + ep_index = xhci_last_valid_endpoint(add_flags) - 1; + + /* A usb_set_interface() call directly after clearing a halted + * condition may race on this quirky hardware. Not worth + * worrying about, since this is prototype hardware. Not sure + * if this will work for streams, but streams support was + * untested on this prototype. + */ + if (xhci->quirks & XHCI_RESET_EP_QUIRK && + ep_index != (unsigned int) -1 && + add_flags - SLOT_FLAG == drop_flags) { + ep_state = virt_dev->eps[ep_index].ep_state; + if (!(ep_state & EP_HALTED)) + return; + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "Completed config ep cmd - " + "last ep index = %d, state = %d", + ep_index, ep_state); + /* Clear internal halted state and restart ring(s) */ + virt_dev->eps[ep_index].ep_state &= ~EP_HALTED; + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + return; } + return; } -static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci) +static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id, + struct xhci_event_cmd *event) { - struct xhci_cd *cur_cd, *next_cd; + xhci_dbg(xhci, "Completed reset device command.\n"); + if (!xhci->devs[slot_id]) + xhci_warn(xhci, "Reset device command completion " + "for disabled slot %u\n", slot_id); +} - if (list_empty(&xhci->cancel_cmd_list)) +static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, + struct xhci_event_cmd *event) +{ + if (!(xhci->quirks & XHCI_NEC_HOST)) { + xhci->error_bitmask |= 1 << 6; return; - - list_for_each_entry_safe(cur_cd, next_cd, - &xhci->cancel_cmd_list, cancel_cmd_list) { - xhci_cmd_to_noop(xhci, cur_cd); - list_del(&cur_cd->cancel_cmd_list); - kfree(cur_cd); } + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, + "NEC firmware version %2x.%02x", + NEC_FW_MAJOR(le32_to_cpu(event->status)), + NEC_FW_MINOR(le32_to_cpu(event->status))); } -/* - * traversing the cancel_cmd_list. If the command descriptor according - * to cmd_trb is found, the function free it and return 1, otherwise - * return 0. - */ -static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci, - union xhci_trb *cmd_trb) +static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status) { - struct xhci_cd *cur_cd, *next_cd; - - if (list_empty(&xhci->cancel_cmd_list)) - return 0; + list_del(&cmd->cmd_list); - list_for_each_entry_safe(cur_cd, next_cd, - &xhci->cancel_cmd_list, cancel_cmd_list) { - if (cur_cd->cmd_trb == cmd_trb) { - if (cur_cd->command) - xhci_complete_cmd_in_cmd_wait_list(xhci, - cur_cd->command, COMP_CMD_STOP); - list_del(&cur_cd->cancel_cmd_list); - kfree(cur_cd); - return 1; - } + if (cmd->completion) { + cmd->status = status; + complete(cmd->completion); + } else { + kfree(cmd); } +} - return 0; +void xhci_cleanup_command_queue(struct xhci_hcd *xhci) +{ + struct xhci_command *cur_cmd, *tmp_cmd; + list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) + xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT); } -/* - * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the - * trb pointed by the command ring dequeue pointer is the trb we want to - * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will - * traverse the cancel_cmd_list to trun the all of the commands according - * to command descriptor to NO-OP trb. - */ -static int handle_stopped_cmd_ring(struct xhci_hcd *xhci, - int cmd_trb_comp_code) +void xhci_handle_command_timeout(struct work_struct *work) { - int cur_trb_is_good = 0; + struct xhci_hcd *xhci; + int ret; + unsigned long flags; + u64 hw_ring_state; + + xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer); + + spin_lock_irqsave(&xhci->lock, flags); - /* Searching the cmd trb pointed by the command ring dequeue - * pointer in command descriptor list. If it is found, free it. + /* + * If timeout work is pending, or current_cmd is NULL, it means we + * raced with command completion. Command is handled so just return. */ - cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci, - xhci->cmd_ring->dequeue); + if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) { + spin_unlock_irqrestore(&xhci->lock, flags); + return; + } + /* mark this command to be cancelled */ + xhci->current_cmd->status = COMP_CMD_ABORT; - if (cmd_trb_comp_code == COMP_CMD_ABORT) - xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; - else if (cmd_trb_comp_code == COMP_CMD_STOP) { - /* traversing the cancel_cmd_list and canceling - * the command according to command descriptor - */ - xhci_cancel_cmd_in_cd_list(xhci); + /* Make sure command ring is running before aborting it */ + hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); + if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && + (hw_ring_state & CMD_RING_RUNNING)) { + /* Prevent new doorbell, and start command abort */ + xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; + xhci_dbg(xhci, "Command timeout\n"); + ret = xhci_abort_cmd_ring(xhci, flags); + if (unlikely(ret == -ESHUTDOWN)) { + xhci_err(xhci, "Abort command ring failed\n"); + xhci_cleanup_command_queue(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); + xhci_dbg(xhci, "xHCI host controller is dead.\n"); - xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; - /* - * ring command ring doorbell again to restart the - * command ring - */ - if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) - xhci_ring_cmd_db(xhci); + return; + } + + goto time_out_completed; } - return cur_trb_is_good; + + /* host removed. Bail out */ + if (xhci->xhc_state & XHCI_STATE_REMOVING) { + xhci_dbg(xhci, "host removed, ring start fail?\n"); + xhci_cleanup_command_queue(xhci); + + goto time_out_completed; + } + + /* command timeout on stopped ring, ring can't be aborted */ + xhci_dbg(xhci, "Command timeout on stopped ring\n"); + xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); + +time_out_completed: + spin_unlock_irqrestore(&xhci->lock, flags); + return; } static void handle_cmd_completion(struct xhci_hcd *xhci, @@ -1370,15 +1330,15 @@ int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); u64 cmd_dma; dma_addr_t cmd_dequeue_dma; - struct xhci_input_control_ctx *ctrl_ctx; - struct xhci_virt_device *virt_dev; - unsigned int ep_index; - struct xhci_ring *ep_ring; - unsigned int ep_state; + u32 cmd_comp_code; + union xhci_trb *cmd_trb; + struct xhci_command *cmd; + u32 cmd_type; cmd_dma = le64_to_cpu(event->cmd_trb); + cmd_trb = xhci->cmd_ring->dequeue; cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, - xhci->cmd_ring->dequeue); + cmd_trb); /* Is the command ring deq ptr out of sync with the deq seg ptr? */ if (cmd_dequeue_dma == 0) { xhci->error_bitmask |= 1 << 4; @@ -1390,137 +1350,107 @@ return; } - if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) || - (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) { - /* If the return value is 0, we think the trb pointed by - * command ring dequeue pointer is a good trb. The good - * trb means we don't want to cancel the trb, but it have - * been stopped by host. So we should handle it normally. - * Otherwise, driver should invoke inc_deq() and return. - */ - if (handle_stopped_cmd_ring(xhci, - GET_COMP_CODE(le32_to_cpu(event->status)))) { - inc_deq(xhci, xhci->cmd_ring); - return; + cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); + + cancel_delayed_work(&xhci->cmd_timer); + + trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); + + cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); + + /* If CMD ring stopped we own the trbs between enqueue and dequeue */ + if (cmd_comp_code == COMP_CMD_STOP) { + complete_all(&xhci->cmd_ring_stop_completion); + return; + } + + if (cmd->command_trb != xhci->cmd_ring->dequeue) { + xhci_err(xhci, + "Command completion event does not match command\n"); + return; + } + + /* + * Host aborted the command ring, check if the current command was + * supposed to be aborted, otherwise continue normally. + * The command ring is stopped now, but the xHC will issue a Command + * Ring Stopped event which will cause us to restart it. + */ + if (cmd_comp_code == COMP_CMD_ABORT) { + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; + if (cmd->status == COMP_CMD_ABORT) { + if (xhci->current_cmd == cmd) + xhci->current_cmd = NULL; + goto event_handled; } - /* There is no command to handle if we get a stop event when the - * command ring is empty, event->cmd_trb points to the next - * unset command - */ - if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue) - return; } - switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) - & TRB_TYPE_BITMASK) { - case TRB_TYPE(TRB_ENABLE_SLOT): - if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS) - xhci->slot_id = slot_id; - else - xhci->slot_id = 0; - complete(&xhci->addr_dev); + cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); + switch (cmd_type) { + case TRB_ENABLE_SLOT: + xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code); break; - case TRB_TYPE(TRB_DISABLE_SLOT): - if (xhci->devs[slot_id]) { - if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) - /* Delete default control endpoint resources */ - xhci_free_device_endpoint_resources(xhci, - xhci->devs[slot_id], true); - xhci_free_virt_device(xhci, slot_id); - } + case TRB_DISABLE_SLOT: + xhci_handle_cmd_disable_slot(xhci, slot_id); break; - case TRB_TYPE(TRB_CONFIG_EP): - virt_dev = xhci->devs[slot_id]; - if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) - break; - /* - * Configure endpoint commands can come from the USB core - * configuration or alt setting changes, or because the HW - * needed an extra configure endpoint command after a reset - * endpoint command or streams were being configured. - * If the command was for a halted endpoint, the xHCI driver - * is not waiting on the configure endpoint command. - */ - ctrl_ctx = xhci_get_input_control_ctx(xhci, - virt_dev->in_ctx); - /* Input ctx add_flags are the endpoint index plus one */ - ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1; - /* A usb_set_interface() call directly after clearing a halted - * condition may race on this quirky hardware. Not worth - * worrying about, since this is prototype hardware. Not sure - * if this will work for streams, but streams support was - * untested on this prototype. - */ - if (xhci->quirks & XHCI_RESET_EP_QUIRK && - ep_index != (unsigned int) -1 && - le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG == - le32_to_cpu(ctrl_ctx->drop_flags)) { - ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; - ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; - if (!(ep_state & EP_HALTED)) - goto bandwidth_change; - xhci_dbg(xhci, "Completed config ep cmd - " - "last ep index = %d, state = %d\n", - ep_index, ep_state); - /* Clear internal halted state and restart ring(s) */ - xhci->devs[slot_id]->eps[ep_index].ep_state &= - ~EP_HALTED; - ring_doorbell_for_active_rings(xhci, slot_id, ep_index); - break; - } -bandwidth_change: - xhci_dbg(xhci, "Completed config ep cmd\n"); - xhci->devs[slot_id]->cmd_status = - GET_COMP_CODE(le32_to_cpu(event->status)); - complete(&xhci->devs[slot_id]->cmd_completion); - break; - case TRB_TYPE(TRB_EVAL_CONTEXT): - virt_dev = xhci->devs[slot_id]; - if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) - break; - xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status)); - complete(&xhci->devs[slot_id]->cmd_completion); + case TRB_CONFIG_EP: + if (!cmd->completion) + xhci_handle_cmd_config_ep(xhci, slot_id, event, + cmd_comp_code); break; - case TRB_TYPE(TRB_ADDR_DEV): - xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status)); - complete(&xhci->addr_dev); + case TRB_EVAL_CONTEXT: break; - case TRB_TYPE(TRB_STOP_RING): - handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event); + case TRB_ADDR_DEV: break; - case TRB_TYPE(TRB_SET_DEQ): - handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue); + case TRB_STOP_RING: + WARN_ON(slot_id != TRB_TO_SLOT_ID( + le32_to_cpu(cmd_trb->generic.field[3]))); + xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event); break; - case TRB_TYPE(TRB_CMD_NOOP): + case TRB_SET_DEQ: + WARN_ON(slot_id != TRB_TO_SLOT_ID( + le32_to_cpu(cmd_trb->generic.field[3]))); + xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); break; - case TRB_TYPE(TRB_RESET_EP): - handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); + case TRB_CMD_NOOP: + /* Is this an aborted command turned to NO-OP? */ + if (cmd->status == COMP_CMD_STOP) + cmd_comp_code = COMP_CMD_STOP; break; - case TRB_TYPE(TRB_RESET_DEV): - xhci_dbg(xhci, "Completed reset device command.\n"); + case TRB_RESET_EP: + WARN_ON(slot_id != TRB_TO_SLOT_ID( + le32_to_cpu(cmd_trb->generic.field[3]))); + xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); + break; + case TRB_RESET_DEV: + /* SLOT_ID field in reset device cmd completion event TRB is 0. + * Use the SLOT_ID from the command TRB instead (xhci 4.6.11) + */ slot_id = TRB_TO_SLOT_ID( - le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])); - virt_dev = xhci->devs[slot_id]; - if (virt_dev) - handle_cmd_in_cmd_wait_list(xhci, virt_dev, event); - else - xhci_warn(xhci, "Reset device command completion " - "for disabled slot %u\n", slot_id); + le32_to_cpu(cmd_trb->generic.field[3])); + xhci_handle_cmd_reset_dev(xhci, slot_id, event); break; - case TRB_TYPE(TRB_NEC_GET_FW): - if (!(xhci->quirks & XHCI_NEC_HOST)) { - xhci->error_bitmask |= 1 << 6; - break; - } - xhci_dbg(xhci, "NEC firmware version %2x.%02x\n", - NEC_FW_MAJOR(le32_to_cpu(event->status)), - NEC_FW_MINOR(le32_to_cpu(event->status))); + case TRB_NEC_GET_FW: + xhci_handle_cmd_nec_get_fw(xhci, event); break; default: /* Skip over unknown commands on the event ring */ xhci->error_bitmask |= 1 << 6; break; } + + /* restart timer if this wasn't the last command */ + if (cmd->cmd_list.next != &xhci->cmd_list) { + xhci->current_cmd = list_entry(cmd->cmd_list.next, + struct xhci_command, cmd_list); + xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); + } else if (xhci->current_cmd == cmd) { + xhci->current_cmd = NULL; + } + +event_handled: + xhci_complete_del_and_free_cmd(cmd, cmd_comp_code); + inc_deq(xhci, xhci->cmd_ring); } @@ -1567,7 +1497,7 @@ * 1.1 ports are under the USB 2.0 hub. If the port speed * matches the device speed, it's a similar speed port. */ - if ((port_speed == 0x03) == (hcd->speed == HCD_USB3)) + if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3)) num_similar_speed_ports++; } return num_similar_speed_ports; @@ -1579,7 +1509,7 @@ u32 slot_id; struct usb_device *udev; - slot_id = TRB_TO_SLOT_ID(event->generic.field[3]); + slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3])); if (!xhci->devs[slot_id]) { xhci_warn(xhci, "Device Notification event for " "unused slot %u\n", slot_id); @@ -1629,7 +1559,7 @@ /* Find the right roothub. */ hcd = xhci_to_hcd(xhci); - if ((major_revision == 0x03) != (hcd->speed == HCD_USB3)) + if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3)) hcd = xhci->shared_hcd; if (major_revision == 0) { @@ -1655,7 +1585,7 @@ * correct bus_state structure. */ bus_state = &xhci->bus_state[hcd_index(hcd)]; - if (hcd->speed == HCD_USB3) + if (hcd->speed >= HCD_USB3) port_array = xhci->usb3_ports; else port_array = xhci->usb2_ports; @@ -1663,25 +1593,25 @@ faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci, port_id); - temp = xhci_readl(xhci, port_array[faked_port_index]); + temp = readl(port_array[faked_port_index]); if (hcd->state == HC_STATE_SUSPENDED) { xhci_dbg(xhci, "resume root hub\n"); usb_hcd_resume_root_hub(hcd); } - if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE) + if (hcd->speed >= HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE) bus_state->port_remote_wakeup &= ~(1 << faked_port_index); if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { xhci_dbg(xhci, "port resume event for port %d\n", port_id); - temp1 = xhci_readl(xhci, &xhci->op_regs->command); + temp1 = readl(&xhci->op_regs->command); if (!(temp1 & CMD_RUN)) { xhci_warn(xhci, "xHC is not running.\n"); goto cleanup; } - if (DEV_SUPERSPEED(temp)) { + if (DEV_SUPERSPEED_ANY(temp)) { xhci_dbg(xhci, "remote wake SS port %d\n", port_id); /* Set a flag to say the port signaled remote wakeup, * so we can tell the difference between the end of @@ -1697,7 +1627,8 @@ */ bogus_port_status = true; goto cleanup; - } else { + } else if (!test_bit(faked_port_index, + &bus_state->resuming_ports)) { xhci_dbg(xhci, "resume HS port %d\n", port_id); bus_state->resume_done[faked_port_index] = jiffies + msecs_to_jiffies(USB_RESUME_TIMEOUT); @@ -1709,7 +1640,7 @@ } if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 && - DEV_SUPERSPEED(temp)) { + DEV_SUPERSPEED_ANY(temp)) { xhci_dbg(xhci, "resume SS port %d finished\n", port_id); /* We've just brought the device into U0 through either the * Resume state after a device remote wakeup, or through the @@ -1734,7 +1665,20 @@ } } - if (hcd->speed != HCD_USB3) + /* + * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or + * RExit to a disconnect state). If so, let the the driver know it's + * out of the RExit state. + */ + if (!DEV_SUPERSPEED_ANY(temp) && + test_and_clear_bit(faked_port_index, + &bus_state->rexit_ports)) { + complete(&bus_state->rexit_done[faked_port_index]); + bogus_port_status = true; + goto cleanup; + } + + if (hcd->speed < HCD_USB3) xhci_test_and_clear_bit(xhci, port_array, faked_port_index, PORT_PLC); @@ -1770,10 +1714,12 @@ * TRB in this TD, this function returns that TRB's segment. Otherwise it * returns 0. */ -struct xhci_segment *trb_in_td(struct xhci_segment *start_seg, +struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, + struct xhci_segment *start_seg, union xhci_trb *start_trb, union xhci_trb *end_trb, - dma_addr_t suspect_dma) + dma_addr_t suspect_dma, + bool debug) { dma_addr_t start_dma; dma_addr_t end_seg_dma; @@ -1792,6 +1738,15 @@ /* If the end TRB isn't in this segment, this is set to 0 */ end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb); + if (debug) + xhci_warn(xhci, + "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n", + (unsigned long long)suspect_dma, + (unsigned long long)start_dma, + (unsigned long long)end_trb_dma, + (unsigned long long)cur_seg->dma, + (unsigned long long)end_seg_dma); + if (end_trb_dma > 0) { /* The end TRB is in this segment, so suspect should be here */ if (start_dma <= end_trb_dma) { @@ -1826,16 +1781,17 @@ struct xhci_td *td, union xhci_trb *event_trb) { struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; + struct xhci_command *command; + command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); + if (!command) + return; + ep->ep_state |= EP_HALTED; - ep->stopped_td = td; - ep->stopped_trb = event_trb; ep->stopped_stream = stream_id; - xhci_queue_reset_ep(xhci, slot_id, ep_index); - xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); + xhci_queue_reset_ep(xhci, command, slot_id, ep_index); + xhci_cleanup_stalled_ring(xhci, ep_index, td); - ep->stopped_td = NULL; - ep->stopped_trb = NULL; ep->stopped_stream = 0; xhci_ring_cmd_db(xhci); @@ -1911,82 +1867,66 @@ goto td_cleanup; if (trb_comp_code == COMP_STOP_INVAL || - trb_comp_code == COMP_STOP) { + trb_comp_code == COMP_STOP || + trb_comp_code == COMP_STOP_SHORT) { /* The Endpoint Stop Command completion will take care of any * stopped TDs. A stopped TD may be restarted, so don't update * the ring dequeue pointer or take this TD off any lists yet. */ ep->stopped_td = td; - ep->stopped_trb = event_trb; return 0; + } + if (trb_comp_code == COMP_STALL || + xhci_requires_manual_halt_cleanup(xhci, ep_ctx, + trb_comp_code)) { + /* Issue a reset endpoint command to clear the host side + * halt, followed by a set dequeue command to move the + * dequeue pointer past the TD. + * The class driver clears the device side halt later. + */ + xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, + ep_ring->stream_id, td, event_trb); } else { - if (trb_comp_code == COMP_STALL) { - /* The transfer is completed from the driver's - * perspective, but we need to issue a set dequeue - * command for this stalled endpoint to move the dequeue - * pointer past the TD. We can't do that here because - * the halt condition must be cleared first. Let the - * USB class driver clear the stall later. - */ - ep->stopped_td = td; - ep->stopped_trb = event_trb; - ep->stopped_stream = ep_ring->stream_id; - } else if (xhci_requires_manual_halt_cleanup(xhci, - ep_ctx, trb_comp_code)) { - /* Other types of errors halt the endpoint, but the - * class driver doesn't call usb_reset_endpoint() unless - * the error is -EPIPE. Clear the halted status in the - * xHCI hardware manually. - */ - xhci_cleanup_halted_endpoint(xhci, - slot_id, ep_index, ep_ring->stream_id, - td, event_trb); - } else { - /* Update ring dequeue pointer */ - while (ep_ring->dequeue != td->last_trb) - inc_deq(xhci, ep_ring); + /* Update ring dequeue pointer */ + while (ep_ring->dequeue != td->last_trb) inc_deq(xhci, ep_ring); - } + inc_deq(xhci, ep_ring); + } td_cleanup: - /* Clean up the endpoint's TD list */ - urb = td->urb; - urb_priv = urb->hcpriv; - - /* Do one last check of the actual transfer length. - * If the host controller said we transferred more data than - * the buffer length, urb->actual_length will be a very big - * number (since it's unsigned). Play it safe and say we didn't - * transfer anything. - */ - if (urb->actual_length > urb->transfer_buffer_length) { - xhci_warn(xhci, "URB transfer length is wrong, " - "xHC issue? req. len = %u, " - "act. len = %u\n", - urb->transfer_buffer_length, - urb->actual_length); - urb->actual_length = 0; - if (td->urb->transfer_flags & URB_SHORT_NOT_OK) - *status = -EREMOTEIO; - else - *status = 0; - } - list_del_init(&td->td_list); - /* Was this TD slated to be cancelled but completed anyway? */ - if (!list_empty(&td->cancelled_td_list)) - list_del_init(&td->cancelled_td_list); - - urb_priv->td_cnt++; - /* Giveback the urb when all the tds are completed */ - if (urb_priv->td_cnt == urb_priv->length) { - ret = 1; - if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { - xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; - if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs - == 0) { - if (xhci->quirks & XHCI_AMD_PLL_FIX) - usb_amd_quirk_pll_enable(); - } + /* Clean up the endpoint's TD list */ + urb = td->urb; + urb_priv = urb->hcpriv; + + /* Do one last check of the actual transfer length. + * If the host controller said we transferred more data than the buffer + * length, urb->actual_length will be a very big number (since it's + * unsigned). Play it safe and say we didn't transfer anything. + */ + if (urb->actual_length > urb->transfer_buffer_length) { + xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n", + urb->transfer_buffer_length, + urb->actual_length); + urb->actual_length = 0; + if (td->urb->transfer_flags & URB_SHORT_NOT_OK) + *status = -EREMOTEIO; + else + *status = 0; + } + list_del_init(&td->td_list); + /* Was this TD slated to be cancelled but completed anyway? */ + if (!list_empty(&td->cancelled_td_list)) + list_del_init(&td->cancelled_td_list); + + urb_priv->td_cnt++; + /* Giveback the urb when all the tds are completed */ + if (urb_priv->td_cnt == urb_priv->length) { + ret = 1; + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { + xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; + if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { + if (xhci->quirks & XHCI_AMD_PLL_FIX) + usb_amd_quirk_pll_enable(); } } } @@ -2035,8 +1975,22 @@ else *status = 0; break; - case COMP_STOP_INVAL: + case COMP_STOP_SHORT: + if (event_trb == ep_ring->dequeue || event_trb == td->last_trb) + xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n"); + else + td->urb->actual_length = + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + + return finish_td(xhci, td, event_trb, event, ep, status, false); case COMP_STOP: + /* Did we stop at data stage? */ + if (event_trb != ep_ring->dequeue && event_trb != td->last_trb) + td->urb->actual_length = + td->urb->transfer_buffer_length - + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + /* fall through */ + case COMP_STOP_INVAL: return finish_td(xhci, td, event_trb, event, ep, status, false); default: if (!xhci_requires_manual_halt_cleanup(xhci, @@ -2053,12 +2007,10 @@ td->urb->actual_length = td->urb->transfer_buffer_length - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); - else + else if (!td->urb_length_set) td->urb->actual_length = 0; - xhci_cleanup_halted_endpoint(xhci, - slot_id, ep_index, 0, td, event_trb); - return finish_td(xhci, td, event_trb, event, ep, status, true); + return finish_td(xhci, td, event_trb, event, ep, status, false); } /* * Did we transfer any data, despite the errors that might have @@ -2132,6 +2084,8 @@ } if ((xhci->quirks & XHCI_TRUST_TX_LENGTH)) trb_comp_code = COMP_SHORT_TX; + /* fallthrough */ + case COMP_STOP_SHORT: case COMP_SHORT_TX: frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ? -EREMOTEIO : 0; @@ -2167,6 +2121,10 @@ if (trb_comp_code == COMP_SUCCESS || skip_td) { frame->actual_length = frame->length; td->urb->actual_length += frame->length; + } else if (trb_comp_code == COMP_STOP_SHORT) { + frame->actual_length = + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + td->urb->actual_length += frame->actual_length; } else { for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; cur_trb != event_trb; @@ -2247,6 +2205,7 @@ *status = 0; } break; + case COMP_STOP_SHORT: case COMP_SHORT_TX: if (td->urb->transfer_flags & URB_SHORT_NOT_OK) *status = -EREMOTEIO; @@ -2263,8 +2222,20 @@ td->urb->ep->desc.bEndpointAddress, td->urb->transfer_buffer_length, EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); + /* Stopped - short packet completion */ + if (trb_comp_code == COMP_STOP_SHORT) { + td->urb->actual_length = + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + + if (td->urb->transfer_buffer_length < + td->urb->actual_length) { + xhci_warn(xhci, "HC gave bad length of %d bytes txed\n", + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); + td->urb->actual_length = 0; + /* status will be set by usb core for canceled urbs */ + } /* Fast path - was this the last TRB in the TD for this URB? */ - if (event_trb == td->last_trb) { + } else if (event_trb == td->last_trb) { if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { td->urb->actual_length = td->urb->transfer_buffer_length - @@ -2419,6 +2390,9 @@ case COMP_STOP_INVAL: xhci_dbg(xhci, "Stopped on No-op or Link TRB\n"); break; + case COMP_STOP_SHORT: + xhci_dbg(xhci, "Stopped with short packet transfer detected\n"); + break; case COMP_STALL: xhci_dbg(xhci, "Stalled endpoint\n"); ep->ep_state |= EP_HALTED; @@ -2491,8 +2465,8 @@ status = 0; break; } - xhci_warn(xhci, "ERROR Unknown event condition, HC probably " - "busted\n"); + xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n", + trb_comp_code); goto cleanup; } @@ -2539,8 +2513,8 @@ td_num--; /* Is this a TRB in the currently executing TD? */ - event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, - td->last_trb, event_dma); + event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue, + td->last_trb, event_dma, false); /* * Skip the Force Stopped Event. The event_trb(event_dma) of FSE @@ -2563,7 +2537,7 @@ * successful event after a short transfer. * Ignore it. */ - if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && + if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && ep_ring->last_td_was_short) { ep_ring->last_td_was_short = false; ret = 0; @@ -2572,7 +2546,12 @@ /* HC is busted, give up! */ xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not " - "part of current TD\n"); + "part of current TD ep_index %d " + "comp_code %u\n", ep_index, + trb_comp_code); + trb_in_td(xhci, ep_ring->deq_seg, + ep_ring->dequeue, td->last_trb, + event_dma, true); return -ESHUTDOWN; } @@ -2633,17 +2612,8 @@ if (ret) { urb = td->urb; urb_priv = urb->hcpriv; - /* Leave the TD around for the reset endpoint function - * to use(but only if it's not a control endpoint, - * since we already queued the Set TR dequeue pointer - * command for stalled control endpoints). - */ - if (usb_endpoint_xfer_control(&urb->ep->desc) || - (trb_comp_code != COMP_STALL && - trb_comp_code != COMP_BABBLE)) - xhci_urb_free_priv(xhci, urb_priv); - else - kfree(urb_priv); + + xhci_urb_free_priv(urb_priv); usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); if ((urb->actual_length != urb->transfer_buffer_length && @@ -2767,7 +2737,7 @@ spin_lock(&xhci->lock); /* Check if the xHC generated the interrupt, or the irq is shared */ - status = xhci_readl(xhci, &xhci->op_regs->status); + status = readl(&xhci->op_regs->status); if (status == 0xffffffff) goto hw_died; @@ -2789,19 +2759,20 @@ * Write 1 to clear the interrupt status. */ status |= STS_EINT; - xhci_writel(xhci, status, &xhci->op_regs->status); + writel(status, &xhci->op_regs->status); /* FIXME when MSI-X is supported and there are multiple vectors */ /* Clear the MSI-X event interrupt status */ if (hcd->irq) { u32 irq_pending; /* Acknowledge the PCI interrupt */ - irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending); + irq_pending = readl(&xhci->ir_set->irq_pending); irq_pending |= IMAN_IP; - xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending); + writel(irq_pending, &xhci->ir_set->irq_pending); } - if (xhci->xhc_state & XHCI_STATE_DYING) { + if (xhci->xhc_state & XHCI_STATE_DYING || + xhci->xhc_state & XHCI_STATE_HALTED) { xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " "Shouldn't IRQs be disabled?\n"); /* Clear the event handler busy flag (RW1C); @@ -2843,7 +2814,7 @@ return IRQ_HANDLED; } -irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd) +irqreturn_t xhci_msi_irq(int irq, void *hcd) { return xhci_irq(hcd); } @@ -2917,8 +2888,8 @@ return -ENOMEM; } - xhci_dbg(xhci, "ERROR no room on ep ring, " - "try ring expansion\n"); + xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, + "ERROR no room on ep ring, try ring expansion"); num_trbs_needed = num_trbs - ep_ring->num_trbs_free; if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed, mem_flags)) { @@ -2949,7 +2920,7 @@ /* Toggle the cycle bit after the last ring segment. */ if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { - ring->cycle_state = (ring->cycle_state ? 0 : 1); + ring->cycle_state ^= 1; } ring->enq_seg = ring->enq_seg->next; ring->enqueue = ring->enq_seg->trbs; @@ -3100,14 +3071,10 @@ * to set the polling interval (once the API is added). */ if (xhci_interval != ep_interval) { - if (printk_ratelimit()) - dev_dbg(&urb->dev->dev, "Driver uses different interval" - " (%d microframe%s) than xHCI " - "(%d microframe%s)\n", - ep_interval, - ep_interval == 1 ? "" : "s", - xhci_interval, - xhci_interval == 1 ? "" : "s"); + dev_dbg_ratelimited(&urb->dev->dev, + "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n", + ep_interval, ep_interval == 1 ? "" : "s", + xhci_interval, xhci_interval == 1 ? "" : "s"); urb->interval = xhci_interval; /* Convert back to frames for LS/FS devices */ if (urb->dev->speed == USB_SPEED_LOW || @@ -3118,21 +3085,6 @@ } /* - * The TD size is the number of bytes remaining in the TD (including this TRB), - * right shifted by 10. - * It must fit in bits 21:17, so it can't be bigger than 31. - */ -static u32 xhci_td_remainder(unsigned int remainder) -{ - u32 max = (1 << (21 - 17 + 1)) - 1; - - if ((remainder >> 10) >= max) - return max << 17; - else - return (remainder >> 10) << 17; -} - -/* * For xHCI 1.0 host controllers, TD size is the number of max packet sized * packets remaining in the TD (*not* including this TRB). * @@ -3144,30 +3096,36 @@ * * TD size = total_packet_count - packets_transferred * - * It must fit in bits 21:17, so it can't be bigger than 31. + * For xHCI 0.96 and older, TD size field should be the remaining bytes + * including this TRB, right shifted by 10 + * + * For all hosts it must fit in bits 21:17, so it can't be bigger than 31. + * This is taken care of in the TRB_TD_SIZE() macro + * * The last TRB in a TD must have the TD size set to zero. */ -static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len, - unsigned int total_packet_count, struct urb *urb, - unsigned int num_trbs_left) +static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, + int trb_buff_len, unsigned int td_total_len, + struct urb *urb, unsigned int num_trbs_left) { - int packets_transferred; + u32 maxp, total_packet_count; + + if (xhci->hci_version < 0x100) + return ((td_total_len - transferred) >> 10); + + maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); + total_packet_count = DIV_ROUND_UP(td_total_len, maxp); /* One TRB with a zero-length data packet. */ - if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0)) + if (num_trbs_left == 0 || (transferred == 0 && trb_buff_len == 0) || + trb_buff_len == td_total_len) return 0; - /* All the TRB queueing functions don't count the current TRB in - * running_total. - */ - packets_transferred = (running_total + trb_buff_len) / - GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); - - if ((total_packet_count - packets_transferred) > 31) - return 31 << 17; - return (total_packet_count - packets_transferred) << 17; + /* Queueing functions don't count the current TRB into transferred */ + return (total_packet_count - ((transferred + trb_buff_len) / maxp)); } + static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index) { @@ -3289,17 +3247,12 @@ } /* Set the TRB length, TD size, and interrupter fields. */ - if (xhci->hci_version < 0x100) { - remainder = xhci_td_remainder( - urb->transfer_buffer_length - - running_total); - } else { - remainder = xhci_v1_0_td_remainder(running_total, - trb_buff_len, total_packet_count, urb, - num_trbs - 1); - } + remainder = xhci_td_remainder(xhci, running_total, trb_buff_len, + urb->transfer_buffer_length, + urb, num_trbs - 1); + length_field = TRB_LEN(trb_buff_len) | - remainder | + TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); if (num_trbs > 1) @@ -3462,17 +3415,12 @@ field |= TRB_ISP; /* Set the TRB length, TD size, and interrupter fields. */ - if (xhci->hci_version < 0x100) { - remainder = xhci_td_remainder( - urb->transfer_buffer_length - - running_total); - } else { - remainder = xhci_v1_0_td_remainder(running_total, - trb_buff_len, total_packet_count, urb, - num_trbs - 1); - } + remainder = xhci_td_remainder(xhci, running_total, trb_buff_len, + urb->transfer_buffer_length, + urb, num_trbs - 1); + length_field = TRB_LEN(trb_buff_len) | - remainder | + TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); if (num_trbs > 1) @@ -3510,7 +3458,7 @@ struct usb_ctrlrequest *setup; struct xhci_generic_trb *start_trb; int start_cycle; - u32 field, length_field; + u32 field, length_field, remainder; struct urb_priv *urb_priv; struct xhci_td *td; @@ -3583,9 +3531,15 @@ else field = TRB_TYPE(TRB_DATA); + remainder = xhci_td_remainder(xhci, 0, + urb->transfer_buffer_length, + urb->transfer_buffer_length, + urb, 1); + length_field = TRB_LEN(urb->transfer_buffer_length) | - xhci_td_remainder(urb->transfer_buffer_length) | + TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); + if (urb->transfer_buffer_length > 0) { if (setup->bRequestType & USB_DIR_IN) field |= TRB_DIR_IN; @@ -3648,7 +3602,7 @@ { unsigned int max_burst; - if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER) + if (xhci->hci_version < 0x100 || udev->speed < USB_SPEED_SUPER) return 0; max_burst = urb->ep->ss_ep_comp.bMaxBurst; @@ -3674,6 +3628,7 @@ return 0; switch (udev->speed) { + case USB_SPEED_SUPER_PLUS: case USB_SPEED_SUPER: /* bMaxBurst is zero based: 0 means 1 packet per burst */ max_burst = urb->ep->ss_ep_comp.bMaxBurst; @@ -3691,6 +3646,97 @@ } } +/* + * Calculates Frame ID field of the isochronous TRB identifies the + * target frame that the Interval associated with this Isochronous + * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec. + * + * Returns actual frame id on success, negative value on error. + */ +static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci, + struct urb *urb, int index) +{ + int start_frame, ist, ret = 0; + int start_frame_id, end_frame_id, current_frame_id; + + if (urb->dev->speed == USB_SPEED_LOW || + urb->dev->speed == USB_SPEED_FULL) + start_frame = urb->start_frame + index * urb->interval; + else + start_frame = (urb->start_frame + index * urb->interval) >> 3; + + /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2): + * + * If bit [3] of IST is cleared to '0', software can add a TRB no + * later than IST[2:0] Microframes before that TRB is scheduled to + * be executed. + * If bit [3] of IST is set to '1', software can add a TRB no later + * than IST[2:0] Frames before that TRB is scheduled to be executed. + */ + ist = HCS_IST(xhci->hcs_params2) & 0x7; + if (HCS_IST(xhci->hcs_params2) & (1 << 3)) + ist <<= 3; + + /* Software shall not schedule an Isoch TD with a Frame ID value that + * is less than the Start Frame ID or greater than the End Frame ID, + * where: + * + * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048 + * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048 + * + * Both the End Frame ID and Start Frame ID values are calculated + * in microframes. When software determines the valid Frame ID value; + * The End Frame ID value should be rounded down to the nearest Frame + * boundary, and the Start Frame ID value should be rounded up to the + * nearest Frame boundary. + */ + current_frame_id = readl(&xhci->run_regs->microframe_index); + start_frame_id = roundup(current_frame_id + ist + 1, 8); + end_frame_id = rounddown(current_frame_id + 895 * 8, 8); + + start_frame &= 0x7ff; + start_frame_id = (start_frame_id >> 3) & 0x7ff; + end_frame_id = (end_frame_id >> 3) & 0x7ff; + + xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n", + __func__, index, readl(&xhci->run_regs->microframe_index), + start_frame_id, end_frame_id, start_frame); + + if (start_frame_id < end_frame_id) { + if (start_frame > end_frame_id || + start_frame < start_frame_id) + ret = -EINVAL; + } else if (start_frame_id > end_frame_id) { + if ((start_frame > end_frame_id && + start_frame < start_frame_id)) + ret = -EINVAL; + } else { + ret = -EINVAL; + } + + if (index == 0) { + if (ret == -EINVAL || start_frame == start_frame_id) { + start_frame = start_frame_id + 1; + if (urb->dev->speed == USB_SPEED_LOW || + urb->dev->speed == USB_SPEED_FULL) + urb->start_frame = start_frame; + else + urb->start_frame = start_frame << 3; + ret = 0; + } + } + + if (ret) { + xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n", + start_frame, current_frame_id, index, + start_frame_id, end_frame_id); + xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n"); + return ret; + } + + return start_frame; +} + /* This is for isoc transfer */ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index) @@ -3707,7 +3753,9 @@ u64 start_addr, addr; int i, j; bool more_trbs_coming; + struct xhci_virt_ep *xep; + xep = &xhci->devs[slot_id]->eps[ep_index]; ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; num_tds = urb->number_of_packets; @@ -3755,6 +3803,7 @@ td = urb_priv->td[i]; for (j = 0; j < trbs_per_td; j++) { + int frame_id = 0; u32 remainder = 0; field = 0; @@ -3763,8 +3812,20 @@ TRB_TLBPC(residue); /* Queue the isoc TRB */ field |= TRB_TYPE(TRB_ISOC); - /* Assume URB_ISO_ASAP is set */ - field |= TRB_SIA; + + /* Calculate Frame ID and SIA fields */ + if (!(urb->transfer_flags & URB_ISO_ASAP) && + HCC_CFC(xhci->hcc_params)) { + frame_id = xhci_get_isoc_frame_id(xhci, + urb, + i); + if (frame_id >= 0) + field |= TRB_FRAME_ID(frame_id); + else + field |= TRB_SIA; + } else + field |= TRB_SIA; + if (i == 0) { if (start_cycle == 0) field |= 0x1; @@ -3808,17 +3869,12 @@ trb_buff_len = td_remain_len; /* Set the TRB length, TD size, & interrupter fields. */ - if (xhci->hci_version < 0x100) { - remainder = xhci_td_remainder( - td_len - running_total); - } else { - remainder = xhci_v1_0_td_remainder( - running_total, trb_buff_len, - total_packet_count, urb, - (trbs_per_td - j - 1)); - } + remainder = xhci_td_remainder(xhci, running_total, + trb_buff_len, td_len, + urb, trbs_per_td - j - 1); + length_field = TRB_LEN(trb_buff_len) | - remainder | + TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); queue_trb(xhci, ep_ring, more_trbs_coming, @@ -3840,6 +3896,10 @@ } } + /* store the next frame id */ + if (HCC_CFC(xhci->hcc_params)) + xep->next_frame_id = urb->start_frame + num_tds * urb->interval; + if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { if (xhci->quirks & XHCI_AMD_PLL_FIX) usb_amd_quirk_pll_disable(); @@ -3876,9 +3936,9 @@ /* * Check transfer ring to guarantee there is enough room for the urb. * Update ISO URB start_frame and interval. - * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to - * update the urb->start_frame by now. - * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input. + * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to + * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or + * Contiguous Frame ID is not supported by HC. */ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index) @@ -3891,8 +3951,11 @@ int ep_interval; int num_tds, num_trbs, i; int ret; + struct xhci_virt_ep *xep; + int ist; xdev = xhci->devs[slot_id]; + xep = &xhci->devs[slot_id]->eps[ep_index]; ep_ring = xdev->eps[ep_index].ring; ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); @@ -3909,14 +3972,10 @@ if (ret) return ret; - start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index); - start_frame &= 0x3fff; - - urb->start_frame = start_frame; - if (urb->dev->speed == USB_SPEED_LOW || - urb->dev->speed == USB_SPEED_FULL) - urb->start_frame >>= 3; - + /* + * Check interval value. This should be done before we start to + * calculate the start frame value. + */ xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); ep_interval = urb->interval; /* Convert to microframes */ @@ -3927,20 +3986,52 @@ * to set the polling interval (once the API is added). */ if (xhci_interval != ep_interval) { - if (printk_ratelimit()) - dev_dbg(&urb->dev->dev, "Driver uses different interval" - " (%d microframe%s) than xHCI " - "(%d microframe%s)\n", - ep_interval, - ep_interval == 1 ? "" : "s", - xhci_interval, - xhci_interval == 1 ? "" : "s"); + dev_dbg_ratelimited(&urb->dev->dev, + "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n", + ep_interval, ep_interval == 1 ? "" : "s", + xhci_interval, xhci_interval == 1 ? "" : "s"); urb->interval = xhci_interval; /* Convert back to frames for LS/FS devices */ if (urb->dev->speed == USB_SPEED_LOW || urb->dev->speed == USB_SPEED_FULL) urb->interval /= 8; } + + /* Calculate the start frame and put it in urb->start_frame. */ + if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { + if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == + EP_STATE_RUNNING) { + urb->start_frame = xep->next_frame_id; + goto skip_start_over; + } + } + + start_frame = readl(&xhci->run_regs->microframe_index); + start_frame &= 0x3fff; + /* + * Round up to the next frame and consider the time before trb really + * gets scheduled by hardare. + */ + ist = HCS_IST(xhci->hcs_params2) & 0x7; + if (HCS_IST(xhci->hcs_params2) & (1 << 3)) + ist <<= 3; + start_frame += ist + XHCI_CFC_DELAY; + start_frame = roundup(start_frame, 8); + + /* + * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT + * is greate than 8 microframes. + */ + if (urb->dev->speed == USB_SPEED_LOW || + urb->dev->speed == USB_SPEED_FULL) { + start_frame = roundup(start_frame, urb->interval << 3); + urb->start_frame = start_frame >> 3; + } else { + start_frame = roundup(start_frame, urb->interval); + urb->start_frame = start_frame; + } + +skip_start_over: ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free; return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); @@ -3956,12 +4047,19 @@ * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB * because the command event handler may want to resubmit a failed command. */ -static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, - u32 field3, u32 field4, bool command_must_succeed) +static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, + u32 field1, u32 field2, + u32 field3, u32 field4, bool command_must_succeed) { int reserved_trbs = xhci->cmd_ring_reserved_trbs; int ret; + if ((xhci->xhc_state & XHCI_STATE_DYING) || + (xhci->xhc_state & XHCI_STATE_HALTED)) { + xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); + return -ESHUTDOWN; + } + if (!command_must_succeed) reserved_trbs++; @@ -3974,57 +4072,71 @@ "unfailable commands failed.\n"); return ret; } + + cmd->command_trb = xhci->cmd_ring->enqueue; + list_add_tail(&cmd->cmd_list, &xhci->cmd_list); + + /* if there are no other commands queued we start the timeout timer */ + if (xhci->cmd_list.next == &cmd->cmd_list && + !delayed_work_pending(&xhci->cmd_timer)) { + xhci->current_cmd = cmd; + xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT); + } + queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, field4 | xhci->cmd_ring->cycle_state); return 0; } /* Queue a slot enable or disable request on the command ring */ -int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) +int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, + u32 trb_type, u32 slot_id) { - return queue_command(xhci, 0, 0, 0, + return queue_command(xhci, cmd, 0, 0, 0, TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false); } /* Queue an address device command TRB */ -int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, - u32 slot_id) +int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, + dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup) { - return queue_command(xhci, lower_32_bits(in_ctx_ptr), + return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), upper_32_bits(in_ctx_ptr), 0, - TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id), - false); + TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id) + | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false); } -int xhci_queue_vendor_command(struct xhci_hcd *xhci, +int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, u32 field1, u32 field2, u32 field3, u32 field4) { - return queue_command(xhci, field1, field2, field3, field4, false); + return queue_command(xhci, cmd, field1, field2, field3, field4, false); } /* Queue a reset device command TRB */ -int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id) +int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, + u32 slot_id) { - return queue_command(xhci, 0, 0, 0, + return queue_command(xhci, cmd, 0, 0, 0, TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), false); } /* Queue a configure endpoint command TRB */ -int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, +int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, + struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) { - return queue_command(xhci, lower_32_bits(in_ctx_ptr), + return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), upper_32_bits(in_ctx_ptr), 0, TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), command_must_succeed); } /* Queue an evaluate context command TRB */ -int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, - u32 slot_id, bool command_must_succeed) +int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, + dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) { - return queue_command(xhci, lower_32_bits(in_ctx_ptr), + return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), upper_32_bits(in_ctx_ptr), 0, TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), command_must_succeed); @@ -4034,60 +4146,93 @@ * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop * activity on an endpoint that is about to be suspended. */ -int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, - unsigned int ep_index, int suspend) +int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, + int slot_id, unsigned int ep_index, int suspend) { u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); u32 type = TRB_TYPE(TRB_STOP_RING); u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend); - return queue_command(xhci, 0, 0, 0, + return queue_command(xhci, cmd, 0, 0, 0, trb_slot_id | trb_ep_index | type | trb_suspend, false); } -/* Set Transfer Ring Dequeue Pointer command. - * This should not be used for endpoints that have streams enabled. - */ -static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, - unsigned int ep_index, unsigned int stream_id, - struct xhci_segment *deq_seg, - union xhci_trb *deq_ptr, u32 cycle_state) +/* Set Transfer Ring Dequeue Pointer command */ +void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, + unsigned int slot_id, unsigned int ep_index, + unsigned int stream_id, + struct xhci_dequeue_state *deq_state) { dma_addr_t addr; u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id); + u32 trb_sct = 0; u32 type = TRB_TYPE(TRB_SET_DEQ); struct xhci_virt_ep *ep; + struct xhci_command *cmd; + int ret; + + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u", + deq_state->new_deq_seg, + (unsigned long long)deq_state->new_deq_seg->dma, + deq_state->new_deq_ptr, + (unsigned long long)xhci_trb_virt_to_dma( + deq_state->new_deq_seg, deq_state->new_deq_ptr), + deq_state->new_cycle_state); - addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); + addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, + deq_state->new_deq_ptr); if (addr == 0) { xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", - deq_seg, deq_ptr); - return 0; + deq_state->new_deq_seg, deq_state->new_deq_ptr); + return; } ep = &xhci->devs[slot_id]->eps[ep_index]; if ((ep->ep_state & SET_DEQ_PENDING)) { xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n"); - return 0; + return; } - ep->queued_deq_seg = deq_seg; - ep->queued_deq_ptr = deq_ptr; - return queue_command(xhci, lower_32_bits(addr) | cycle_state, - upper_32_bits(addr), trb_stream_id, - trb_slot_id | trb_ep_index | type, false); + + /* This function gets called from contexts where it cannot sleep */ + cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); + if (!cmd) { + xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n"); + return; + } + + ep->queued_deq_seg = deq_state->new_deq_seg; + ep->queued_deq_ptr = deq_state->new_deq_ptr; + if (stream_id) + trb_sct = SCT_FOR_TRB(SCT_PRI_TR); + ret = queue_command(xhci, cmd, + lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state, + upper_32_bits(addr), trb_stream_id, + trb_slot_id | trb_ep_index | type, false); + if (ret < 0) { + xhci_free_command(xhci, cmd); + return; + } + + /* Stop the TD queueing code from ringing the doorbell until + * this command completes. The HC won't set the dequeue pointer + * if the ring is running, and ringing the doorbell starts the + * ring running. + */ + ep->ep_state |= SET_DEQ_PENDING; } -int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, - unsigned int ep_index) +int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, + int slot_id, unsigned int ep_index) { u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); u32 type = TRB_TYPE(TRB_RESET_EP); - return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type, - false); + return queue_command(xhci, cmd, 0, 0, 0, + trb_slot_id | trb_ep_index | type, false); }