--- zzzz-none-000/linux-3.10.107/drivers/usb/host/xhci-ring.c 2017-06-27 09:49:32.000000000 +0000 +++ vr9-7490-729/linux-3.10.107/drivers/usb/host/xhci-ring.c 2021-11-10 11:53:55.000000000 +0000 @@ -555,6 +555,7 @@ struct xhci_generic_trb *trb; struct xhci_ep_ctx *ep_ctx; dma_addr_t addr; + u64 hw_dequeue; ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, ep_index, stream_id); @@ -564,52 +565,54 @@ stream_id); return; } - state->new_cycle_state = 0; - xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); - state->new_deq_seg = find_trb_seg(cur_td->start_seg, - dev->eps[ep_index].stopped_trb, - &state->new_cycle_state); - if (!state->new_deq_seg) { - WARN_ON(1); - return; - } /* Dig out the cycle state saved by the xHC during the stop ep cmd */ xhci_dbg(xhci, "Finding endpoint context\n"); ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); - state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq); + hw_dequeue = le64_to_cpu(ep_ctx->deq); + + /* Find virtual address and segment of hardware dequeue pointer */ + state->new_deq_seg = ep_ring->deq_seg; + state->new_deq_ptr = ep_ring->dequeue; + while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr) + != (dma_addr_t)(hw_dequeue & ~0xf)) { + next_trb(xhci, ep_ring, &state->new_deq_seg, + &state->new_deq_ptr); + if (state->new_deq_ptr == ep_ring->dequeue) { + WARN_ON(1); + return; + } + } + /* + * Find cycle state for last_trb, starting at old cycle state of + * hw_dequeue. If there is only one segment ring, find_trb_seg() will + * return immediately and cannot toggle the cycle state if this search + * wraps around, so add one more toggle manually in that case. + */ + state->new_cycle_state = hw_dequeue & 0x1; + if (ep_ring->first_seg == ep_ring->first_seg->next && + cur_td->last_trb < state->new_deq_ptr) + state->new_cycle_state ^= 0x1; state->new_deq_ptr = cur_td->last_trb; xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); state->new_deq_seg = find_trb_seg(state->new_deq_seg, - state->new_deq_ptr, - &state->new_cycle_state); + state->new_deq_ptr, &state->new_cycle_state); if (!state->new_deq_seg) { WARN_ON(1); return; } + /* Increment to find next TRB after last_trb. Cycle if appropriate. */ trb = &state->new_deq_ptr->generic; if (TRB_TYPE_LINK_LE32(trb->field[3]) && (trb->field[3] & cpu_to_le32(LINK_TOGGLE))) state->new_cycle_state ^= 0x1; next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); - /* - * If there is only one segment in a ring, find_trb_seg()'s while loop - * will not run, and it will return before it has a chance to see if it - * needs to toggle the cycle bit. It can't tell if the stalled transfer - * ended just before the link TRB on a one-segment ring, or if the TD - * wrapped around the top of the ring, because it doesn't have the TD in - * question. Look for the one-segment case where stalled TRB's address - * is greater than the new dequeue pointer address. - */ - if (ep_ring->first_seg == ep_ring->first_seg->next && - state->new_deq_ptr < dev->eps[ep_index].stopped_trb) - state->new_cycle_state ^= 0x1; + /* Don't update the ring cycle state for the producer (us). */ xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state); - /* Don't update the ring cycle state for the producer (us). */ xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", state->new_deq_seg); addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); @@ -791,7 +794,6 @@ if (list_empty(&ep->cancelled_td_list)) { xhci_stop_watchdog_timer_in_irq(xhci, ep); ep->stopped_td = NULL; - ep->stopped_trb = NULL; ring_doorbell_for_active_rings(xhci, slot_id, ep_index); return; } @@ -858,11 +860,9 @@ ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } - /* Clear stopped_td and stopped_trb if endpoint is not halted */ - if (!(ep->ep_state & EP_HALTED)) { + /* Clear stopped_td if endpoint is not halted */ + if (!(ep->ep_state & EP_HALTED)) ep->stopped_td = NULL; - ep->stopped_trb = NULL; - } /* * Drop the lock and complete the URBs in the cancelled TD list. @@ -1431,6 +1431,11 @@ break; case TRB_TYPE(TRB_CONFIG_EP): virt_dev = xhci->devs[slot_id]; + /* 20170502 AVM/VGJ Check whether virt_dev is null before using it */ + if (!virt_dev) { + xhci_warn(xhci, "No virtual device for slot %d\n", slot_id); + break; + } if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) break; /* @@ -1476,6 +1481,11 @@ break; case TRB_TYPE(TRB_EVAL_CONTEXT): virt_dev = xhci->devs[slot_id]; + /* 20170502 AVM/VGJ Check whether virt_dev is null before using it */ + if (!virt_dev) { + xhci_warn(xhci, "No virtual device for slot %d\n", slot_id); + break; + } if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) break; xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status)); @@ -1828,14 +1838,12 @@ struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; ep->ep_state |= EP_HALTED; ep->stopped_td = td; - ep->stopped_trb = event_trb; ep->stopped_stream = stream_id; xhci_queue_reset_ep(xhci, slot_id, ep_index); xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); ep->stopped_td = NULL; - ep->stopped_trb = NULL; ep->stopped_stream = 0; xhci_ring_cmd_db(xhci); @@ -1917,7 +1925,6 @@ * the ring dequeue pointer or take this TD off any lists yet. */ ep->stopped_td = td; - ep->stopped_trb = event_trb; return 0; } else { if (trb_comp_code == COMP_STALL) { @@ -1929,7 +1936,6 @@ * USB class driver clear the stall later. */ ep->stopped_td = td; - ep->stopped_trb = event_trb; ep->stopped_stream = ep_ring->stream_id; } else if (xhci_requires_manual_halt_cleanup(xhci, ep_ctx, trb_comp_code)) { @@ -2563,7 +2569,7 @@ * successful event after a short transfer. * Ignore it. */ - if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && + if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && ep_ring->last_td_was_short) { ep_ring->last_td_was_short = false; ret = 0;