--- zzzz-none-000/linux-2.6.32.61/drivers/usb/host/xhci-ring.c 2013-06-10 09:43:48.000000000 +0000 +++ ar10-7272-687/linux-2.6.32.61/drivers/usb/host/xhci-ring.c 2014-10-29 15:55:54.000000000 +0000 @@ -64,6 +64,7 @@ * endpoint rings; it generates events on the event ring for these. */ +#include #include #include "xhci.h" @@ -95,7 +96,7 @@ return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && (seg->next == xhci->event_ring->first_seg); else - return trb->link.control & LINK_TOGGLE; + return (SWAP32(trb->link.control) & LINK_TOGGLE); } /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring @@ -108,7 +109,7 @@ if (ring == xhci->event_ring) return trb == &seg->trbs[TRBS_PER_SEGMENT]; else - return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); + return (SWAP32(trb->link.control) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); } /* Updates trb to point to the next TRB in the ring, and updates seg if the next @@ -182,9 +183,9 @@ union xhci_trb *next; unsigned long long addr; - chain = ring->enqueue->generic.field[3] & TRB_CHAIN; + chain = SWAP32(ring->enqueue->generic.field[3]) & TRB_CHAIN; next = ++(ring->enqueue); - + ring->enq_updates++; /* Update the dequeue pointer further if that was a link TRB or we're at * the end of an event ring segment (which doesn't have link TRBS) @@ -197,15 +198,15 @@ * (which may mean the chain bit is cleared). */ if (!xhci_link_trb_quirk(xhci)) { - next->link.control &= ~TRB_CHAIN; - next->link.control |= chain; + next->link.control &= SWAP32(~TRB_CHAIN); + next->link.control |= SWAP32(chain); } /* Give this link TRB to the hardware */ wmb(); - if (next->link.control & TRB_CYCLE) - next->link.control &= (u32) ~TRB_CYCLE; + if (next->link.control & SWAP32(TRB_CYCLE)) + next->link.control &= SWAP32((u32) ~TRB_CYCLE); else - next->link.control |= (u32) TRB_CYCLE; + next->link.control |= SWAP32((u32) TRB_CYCLE); } /* Toggle the cycle bit after the last ring segment. */ if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { @@ -350,9 +351,8 @@ while (cur_seg->trbs > trb || &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; - if ((generic_trb->field[3] & TRB_TYPE_BITMASK) == - TRB_TYPE(TRB_LINK) && - (generic_trb->field[3] & LINK_TOGGLE)) + if (TRB_TYPE(SWAP32(generic_trb->field[3])) == TRB_LINK && + (SWAP32(generic_trb->field[3]) & LINK_TOGGLE)) *cycle_state = ~(*cycle_state) & 0x1; cur_seg = cur_seg->next; if (cur_seg == start_seg) @@ -399,7 +399,7 @@ /* Dig out the cycle state saved by the xHC during the stop ep cmd */ xhci_dbg(xhci, "Finding endpoint context\n"); ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); - state->new_cycle_state = 0x1 & ep_ctx->deq; + state->new_cycle_state = 0x1 & SWAP64(ep_ctx->deq); state->new_deq_ptr = cur_td->last_trb; xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); @@ -412,8 +412,8 @@ } trb = &state->new_deq_ptr->generic; - if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && - (trb->field[3] & LINK_TOGGLE)) + if (TRB_TYPE(SWAP32(trb->field[3])) == TRB_LINK && + (SWAP32(trb->field[3]) & LINK_TOGGLE)) state->new_cycle_state = ~(state->new_cycle_state) & 0x1; next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); @@ -451,12 +451,12 @@ for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; true; next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { - if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) == + if ((SWAP32(cur_trb->generic.field[3]) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)) { /* Unchain any chained Link TRBs, but * leave the pointers intact. */ - cur_trb->generic.field[3] &= ~TRB_CHAIN; + cur_trb->generic.field[3] &= SWAP32(~TRB_CHAIN); xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); xhci_dbg(xhci, "Address = %p (0x%llx dma); " "in seg %p (0x%llx dma)\n", @@ -469,8 +469,8 @@ cur_trb->generic.field[1] = 0; cur_trb->generic.field[2] = 0; /* Preserve only the cycle bit of this TRB */ - cur_trb->generic.field[3] &= TRB_CYCLE; - cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP); + cur_trb->generic.field[3] &= SWAP32(TRB_CYCLE); + cur_trb->generic.field[3] |= SWAP32(TRB_TYPE(TRB_TR_NOOP)); xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " "in seg %p (0x%llx dma)\n", cur_trb, @@ -539,8 +539,8 @@ #endif memset(&deq_state, 0, sizeof(deq_state)); - slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); - ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); + slot_id = TRB_TO_SLOT_ID(SWAP32(trb->generic.field[3])); + ep_index = TRB_TO_EP_INDEX(SWAP32(trb->generic.field[3])); ep = &xhci->devs[slot_id]->eps[ep_index]; ep_ring = ep->ring; @@ -639,18 +639,18 @@ struct xhci_ep_ctx *ep_ctx; struct xhci_slot_ctx *slot_ctx; - slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); - ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); + slot_id = TRB_TO_SLOT_ID(SWAP32(trb->generic.field[3])); + ep_index = TRB_TO_EP_INDEX(SWAP32(trb->generic.field[3])); dev = xhci->devs[slot_id]; ep_ring = dev->eps[ep_index].ring; ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); - if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { + if (GET_COMP_CODE(SWAP32(event->status)) != COMP_SUCCESS) { unsigned int ep_state; unsigned int slot_state; - switch (GET_COMP_CODE(event->status)) { + switch (GET_COMP_CODE(SWAP32(event->status))) { case COMP_TRB_ERR: xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because " "of stream ID configuration\n"); @@ -658,9 +658,9 @@ case COMP_CTX_STATE: xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " "to incorrect slot or ep state.\n"); - ep_state = ep_ctx->ep_info; + ep_state = SWAP32(ep_ctx->ep_info); ep_state &= EP_STATE_MASK; - slot_state = slot_ctx->dev_state; + slot_state = SWAP32(slot_ctx->dev_state); slot_state = GET_SLOT_STATE(slot_state); xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", slot_state, ep_state); @@ -672,7 +672,7 @@ default: xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown " "completion code of %u.\n", - GET_COMP_CODE(event->status)); + GET_COMP_CODE(SWAP32(event->status))); break; } /* OK what do we do now? The endpoint state is hosed, and we @@ -683,7 +683,7 @@ */ } else { xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", - ep_ctx->deq); + SWAP64(ep_ctx->deq)); } dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; @@ -698,14 +698,14 @@ unsigned int ep_index; struct xhci_ring *ep_ring; - slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); - ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); + slot_id = TRB_TO_SLOT_ID(SWAP32(trb->generic.field[3])); + ep_index = TRB_TO_EP_INDEX(SWAP32(trb->generic.field[3])); ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; /* This command will only fail if the endpoint wasn't halted, * but we don't care. */ xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", - (unsigned int) GET_COMP_CODE(event->status)); + (unsigned int) GET_COMP_CODE(SWAP32(event->status))); /* HW with the reset endpoint quirk needs to have a configure endpoint * command complete before the endpoint can be used. Queue that here @@ -743,7 +743,7 @@ return 0; command->status = - GET_COMP_CODE(event->status); + GET_COMP_CODE(SWAP32(event->status)); list_del(&command->cmd_list); if (command->completion) complete(command->completion); @@ -755,7 +755,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, struct xhci_event_cmd *event) { - int slot_id = TRB_TO_SLOT_ID(event->flags); + int slot_id = TRB_TO_SLOT_ID(SWAP32(event->flags)); u64 cmd_dma; dma_addr_t cmd_dequeue_dma; struct xhci_input_control_ctx *ctrl_ctx; @@ -764,7 +764,7 @@ struct xhci_ring *ep_ring; unsigned int ep_state; - cmd_dma = event->cmd_trb; + cmd_dma = SWAP64(event->cmd_trb); cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, xhci->cmd_ring->dequeue); /* Is the command ring deq ptr out of sync with the deq seg ptr? */ @@ -777,9 +777,9 @@ xhci->error_bitmask |= 1 << 5; return; } - switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) { + switch (SWAP32(xhci->cmd_ring->dequeue->generic.field[3]) & TRB_TYPE_BITMASK) { case TRB_TYPE(TRB_ENABLE_SLOT): - if (GET_COMP_CODE(event->status) == COMP_SUCCESS) + if (GET_COMP_CODE(SWAP32(event->status)) == COMP_SUCCESS) xhci->slot_id = slot_id; else xhci->slot_id = 0; @@ -803,12 +803,12 @@ ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); /* Input ctx add_flags are the endpoint index plus one */ - ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; + ep_index = xhci_last_valid_endpoint(SWAP32(ctrl_ctx->add_flags)) - 1; ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; if (!ep_ring) { /* This must have been an initial configure endpoint */ xhci->devs[slot_id]->cmd_status = - GET_COMP_CODE(event->status); + GET_COMP_CODE(SWAP32(event->status)); complete(&xhci->devs[slot_id]->cmd_completion); break; } @@ -823,7 +823,7 @@ ring_ep_doorbell(xhci, slot_id, ep_index); } else { xhci->devs[slot_id]->cmd_status = - GET_COMP_CODE(event->status); + GET_COMP_CODE(SWAP32(event->status)); complete(&xhci->devs[slot_id]->cmd_completion); } break; @@ -831,11 +831,11 @@ virt_dev = xhci->devs[slot_id]; if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) break; - xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); + xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(SWAP32(event->status)); complete(&xhci->devs[slot_id]->cmd_completion); break; case TRB_TYPE(TRB_ADDR_DEV): - xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); + xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(SWAP32(event->status)); complete(&xhci->addr_dev); break; case TRB_TYPE(TRB_STOP_RING): @@ -850,6 +850,18 @@ case TRB_TYPE(TRB_RESET_EP): handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); break; +/* -- 20140828 AVM/WK XHCI reset patch from newer kernel --*/ + case TRB_TYPE(TRB_RESET_DEV): + xhci_dbg(xhci, "Completed reset device command.\n"); + slot_id = TRB_TO_SLOT_ID(SWAP32( + xhci->cmd_ring->dequeue->generic.field[3])); + virt_dev = xhci->devs[slot_id]; + if (virt_dev) + handle_cmd_in_cmd_wait_list(xhci, virt_dev, event); + else + xhci_warn(xhci, "Reset device command completion " + "for disabled slot %u\n", slot_id); + break; default: /* Skip over unknown commands on the event ring */ xhci->error_bitmask |= 1 << 6; @@ -864,12 +876,12 @@ u32 port_id; /* Port status change events always have a successful completion code */ - if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) { + if (GET_COMP_CODE(SWAP32(event->generic.field[2])) != COMP_SUCCESS) { xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); xhci->error_bitmask |= 1 << 8; } /* FIXME: core doesn't care about all port link state changes yet */ - port_id = GET_PORT_ID(event->generic.field[0]); + port_id = GET_PORT_ID(SWAP32(event->generic.field[0])); xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); /* Update event ring dequeue pointer before dropping the lock */ @@ -903,8 +915,9 @@ cur_seg = start_seg; do { - if (start_dma == 0) + if (start_dma == 0){ return 0; + } /* We may get an event for a Link TRB in the middle of a TD */ end_seg_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); @@ -962,7 +975,7 @@ u32 trb_comp_code; xhci_dbg(xhci, "In %s\n", __func__); - slot_id = TRB_TO_SLOT_ID(event->flags); + slot_id = TRB_TO_SLOT_ID(SWAP32(event->flags)); xdev = xhci->devs[slot_id]; if (!xdev) { xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); @@ -970,24 +983,24 @@ } /* Endpoint ID is 1 based, our index is zero based */ - ep_index = TRB_TO_EP_ID(event->flags) - 1; + ep_index = TRB_TO_EP_ID(SWAP32(event->flags)) - 1; xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); ep = &xdev->eps[ep_index]; ep_ring = ep->ring; ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); - if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { + if (!ep_ring || (SWAP32(ep_ctx->ep_info) & EP_STATE_MASK) == EP_STATE_DISABLED) { xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); return -ENODEV; } - event_dma = event->buffer; + event_dma = SWAP64(event->buffer); /* This TRB should be in the TD at the head of this ring's TD list */ xhci_dbg(xhci, "%s - checking for list empty\n", __func__); if (list_empty(&ep_ring->td_list)) { - xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", - TRB_TO_SLOT_ID(event->flags), ep_index); + xhci_warn(xhci, "WARN Event TRB for slot %d ep_index %d with no TDs queued?\n", + TRB_TO_SLOT_ID(SWAP32(event->flags)), ep_index); xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", - (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); + (unsigned int) (SWAP32(event->flags) & TRB_TYPE_BITMASK)>>10); xhci_print_trb_offsets(xhci, (union xhci_trb *) event); urb = NULL; goto cleanup; @@ -1007,18 +1020,18 @@ } event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", - (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); + (unsigned int) (SWAP32(event->flags) & TRB_TYPE_BITMASK)>>10); xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n", - lower_32_bits(event->buffer)); + lower_32_bits(SWAP64(event->buffer))); xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n", - upper_32_bits(event->buffer)); + upper_32_bits(SWAP64(event->buffer))); xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", - (unsigned int) event->transfer_len); + (unsigned int) SWAP32(event->transfer_len)); xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", - (unsigned int) event->flags); + (unsigned int) SWAP32(event->flags)); /* Look for common error cases */ - trb_comp_code = GET_COMP_CODE(event->transfer_len); + trb_comp_code = GET_COMP_CODE(SWAP32(event->transfer_len)); switch (trb_comp_code) { /* Skip codes that require special handling depending on * transfer type @@ -1089,7 +1102,7 @@ * endpoint anyway. Check if a babble halted the * endpoint. */ - if (ep_ctx->ep_info != EP_STATE_HALTED) + if (SWAP32(ep_ctx->ep_info) != EP_STATE_HALTED) break; /* else fall through */ case COMP_STALL: @@ -1098,7 +1111,7 @@ event_trb != td->last_trb) td->urb->actual_length = td->urb->transfer_buffer_length - - TRB_LEN(event->transfer_len); + - TRB_LEN(SWAP32(event->transfer_len)); else td->urb->actual_length = 0; @@ -1142,7 +1155,7 @@ /* We didn't stop on a link TRB in the middle */ td->urb->actual_length = td->urb->transfer_buffer_length - - TRB_LEN(event->transfer_len); + TRB_LEN(SWAP32(event->transfer_len)); xhci_dbg(xhci, "Waiting for status stage event\n"); urb = NULL; goto cleanup; @@ -1185,18 +1198,19 @@ "%d bytes untransferred\n", td->urb->ep->desc.bEndpointAddress, td->urb->transfer_buffer_length, - TRB_LEN(event->transfer_len)); + TRB_LEN(SWAP32(event->transfer_len))); + /* Fast path - was this the last TRB in the TD for this URB? */ if (event_trb == td->last_trb) { - if (TRB_LEN(event->transfer_len) != 0) { + if (TRB_LEN(SWAP32(event->transfer_len)) != 0) { td->urb->actual_length = td->urb->transfer_buffer_length - - TRB_LEN(event->transfer_len); + TRB_LEN(SWAP32(event->transfer_len)); if (td->urb->transfer_buffer_length < td->urb->actual_length) { xhci_warn(xhci, "HC gave bad length " "of %d bytes left\n", - TRB_LEN(event->transfer_len)); + TRB_LEN(SWAP32(event->transfer_len))); td->urb->actual_length = 0; if (td->urb->transfer_flags & URB_SHORT_NOT_OK) @@ -1230,20 +1244,18 @@ for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; cur_trb != event_trb; next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { - if ((cur_trb->generic.field[3] & - TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && - (cur_trb->generic.field[3] & - TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) + if (TRB_TYPE(SWAP32(cur_trb->generic.field[3])) != TRB_TR_NOOP && + TRB_TYPE(SWAP32(cur_trb->generic.field[3])) != TRB_LINK) td->urb->actual_length += - TRB_LEN(cur_trb->generic.field[2]); + TRB_LEN(SWAP32(cur_trb->generic.field[2])); } /* If the ring didn't stop on a Link or No-op TRB, add * in the actual bytes transferred from the Normal TRB */ if (trb_comp_code != COMP_STOP_INVAL) td->urb->actual_length += - TRB_LEN(cur_trb->generic.field[2]) - - TRB_LEN(event->transfer_len); + TRB_LEN(SWAP32(cur_trb->generic.field[2])) - + TRB_LEN(SWAP32(event->transfer_len)); } } if (trb_comp_code == COMP_STOP_INVAL || @@ -1321,6 +1333,7 @@ xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n", urb, urb->actual_length, status); spin_unlock(&xhci->lock); + urb->hcpriv = NULL; usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); spin_lock(&xhci->lock); } @@ -1345,7 +1358,7 @@ event = xhci->event_ring->dequeue; /* Does the HC or OS own the TRB? */ - if ((event->event_cmd.flags & TRB_CYCLE) != + if ((SWAP32(event->event_cmd.flags) & TRB_CYCLE) != xhci->event_ring->cycle_state) { xhci->error_bitmask |= 1 << 2; return; @@ -1353,7 +1366,7 @@ xhci_dbg(xhci, "%s - OS owns TRB\n", __func__); /* FIXME: Handle more event types. */ - switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { + switch ((SWAP32(event->event_cmd.flags) & TRB_TYPE_BITMASK)) { case TRB_TYPE(TRB_COMPLETION): xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__); handle_cmd_completion(xhci, &event->event_cmd); @@ -1369,10 +1382,11 @@ xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__); ret = handle_tx_event(xhci, &event->trans_event); xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__); - if (ret < 0) + if (ret < 0){ xhci->error_bitmask |= 1 << 9; - else + }else{ update_ptrs = 0; + } break; default: xhci->error_bitmask |= 1 << 3; @@ -1400,10 +1414,10 @@ struct xhci_generic_trb *trb; trb = &ring->enqueue->generic; - trb->field[0] = field1; - trb->field[1] = field2; - trb->field[2] = field3; - trb->field[3] = field4; + trb->field[0] = SWAP32(field1); + trb->field[1] = SWAP32(field2); + trb->field[2] = SWAP32(field3); + trb->field[3] = SWAP32(field4); inc_enq(xhci, ring, consumer); } @@ -1414,6 +1428,11 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) { + + #ifdef DEBUG_XHCI_FUNC + printk("%s\n", __func__); + #endif + /* Make sure the endpoint has been added to xHC schedule */ xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state); switch (ep_state) { @@ -1461,7 +1480,7 @@ int ret; struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); ret = prepare_ring(xhci, xdev->eps[ep_index].ring, - ep_ctx->ep_info & EP_STATE_MASK, + SWAP32(ep_ctx->ep_info) & EP_STATE_MASK, num_trbs, mem_flags); if (ret) return ret; @@ -1483,7 +1502,7 @@ list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list); (*td)->start_seg = xdev->eps[ep_index].ring->enq_seg; (*td)->first_trb = xdev->eps[ep_index].ring->enqueue; - + return 0; } @@ -1535,11 +1554,11 @@ static void check_trb_math(struct urb *urb, int num_trbs, int running_total) { if (num_trbs != 0) - dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " + dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " "TRBs, %d left\n", __func__, urb->ep->desc.bEndpointAddress, num_trbs); if (running_total != urb->transfer_buffer_length) - dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " + dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " "queued %#x (%d), asked for %#x (%d)\n", __func__, urb->ep->desc.bEndpointAddress, @@ -1557,7 +1576,7 @@ * isn't reordered. */ wmb(); - start_trb->field[3] |= start_cycle; + start_trb->field[3] |= SWAP32(start_cycle); ring_ep_doorbell(xhci, slot_id, ep_index); } @@ -1575,7 +1594,7 @@ int xhci_interval; int ep_interval; - xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info); + xhci_interval = EP_INTERVAL_TO_UFRAMES(SWAP32(ep_ctx->ep_info)); ep_interval = urb->interval; /* Convert to microframes */ if (urb->dev->speed == USB_SPEED_LOW || @@ -1900,8 +1919,8 @@ setup = (struct usb_ctrlrequest *) urb->setup_packet; queue_trb(xhci, ep_ring, false, /* FIXME endianness is probably going to bite my ass here. */ - setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, - setup->wIndex | setup->wLength << 16, + setup->bRequestType | setup->bRequest << 8 | SWAP16(setup->wValue) << 16, + SWAP16(setup->wIndex) | SWAP16(setup->wLength) << 16, TRB_LEN(8) | TRB_INTR_TARGET(0), /* Immediate data in pointer */ TRB_IDT | TRB_TYPE(TRB_SETUP)); @@ -2007,6 +2026,15 @@ false); } +/* -- 20140828 AVM/WK XHCI reset patch from newer kernel --*/ +/* Queue a reset device command TRB */ +int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id) +{ + return queue_command(xhci, 0, 0, 0, + TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), + false); +} + /* Queue a configure endpoint command TRB */ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)