--- zzzz-none-000/linux-2.6.28.10/drivers/usb/musb/musb_gadget.c 2009-05-02 18:54:43.000000000 +0000 +++ puma5-6360-529/linux-2.6.28.10/drivers/usb/musb/musb_gadget.c 2010-03-30 18:41:51.000000000 +0000 @@ -260,12 +260,13 @@ musb_ep = req->ep; +#if defined(CONFIG_USB_MUSB_HDRC_HCD) || defined(CONFIG_USB_MUSB_OTG) /* we shouldn't get here while DMA is active ... but we do ... */ if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { - DBG(4, "dma pending...\n"); + DBG(4,"dma pending...\n"); return; } - +#endif /* read TXCSR before */ csr = musb_readw(epio, MUSB_TXCSR); @@ -273,12 +274,13 @@ fifo_count = min(max_ep_writesize(musb, musb_ep), (int)(request->length - request->actual)); +#if defined(CONFIG_USB_MUSB_HDRC_HCD) || defined(CONFIG_USB_MUSB_OTG) if (csr & MUSB_TXCSR_TXPKTRDY) { DBG(5, "%s old packet still ready , txcsr %03x\n", musb_ep->end_point.name, csr); return; } - +#endif if (csr & MUSB_TXCSR_P_SENDSTALL) { DBG(5, "%s stalling, txcsr %03x\n", musb_ep->end_point.name, csr); @@ -339,6 +341,7 @@ | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB; + csr |= MUSB_TXCSR_DMAMODE; musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) | csr); @@ -346,6 +349,8 @@ /* ensure writebuffer is empty */ csr = musb_readw(epio, MUSB_TXCSR); + musb_ep->dma->actual_len = 0L; + /* NOTE host side sets DMAENAB later than this; both are * OK since the transfer dma glue (between CPPI and Mentor * fifos) just tells CPPI it could start. Data only moves @@ -408,6 +413,8 @@ struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; void __iomem *epio = musb->endpoints[epnum].regs; struct dma_channel *dma; + //unsigned int count,lcount; + //u16 wCsrVal; musb_ep_select(mbase, epnum); request = next_request(musb_ep); @@ -443,7 +450,7 @@ musb_writew(epio, MUSB_TXCSR, csr); DBG(20, "underrun on ep%d, req %p\n", epnum, request); } - +#if 0 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { /* SHOULD NOT HAPPEN ... has with cppi though, after * changing SENDSTALL (and other cases); harmless? @@ -451,22 +458,29 @@ DBG(5, "%s dma still busy?\n", musb_ep->end_point.name); break; } - +#endif if (request) { u8 is_dma = 0; if (dma && (csr & MUSB_TXCSR_DMAENAB)) { is_dma = 1; + /* the DMA is not disable here, since the residual data + may be still available in cppi-fifo interface, since + the cppi 4.1 dma generates tx completion after the transfer + of last data bytes to cppi-fifo , it will make sure whether + all bytes are transfered to endpoint fifo */ +#if 0 csr |= MUSB_TXCSR_P_WZC_BITS; csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); musb_writew(epio, MUSB_TXCSR, csr); +#endif /* ensure writebuffer is empty */ csr = musb_readw(epio, MUSB_TXCSR); request->actual += musb_ep->dma->actual_len; DBG(4, "TXCSR%d %04x, dma off, " - "len %zu, req %p\n", + "len %Zd, req %p\n", epnum, csr, musb_ep->dma->actual_len, request); @@ -493,6 +507,7 @@ /* on dma completion, fifo may not * be available yet ... */ +#if 0 if (csr & MUSB_TXCSR_TXPKTRDY) break; @@ -501,8 +516,23 @@ MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); request->zero = 0; +#endif } - +#if defined(CONFIG_USB_MUSB_HDRC_HCD) || defined(CONFIG_USB_MUSB_OTG) + count = 0x50000; + lcount = 0; + /* Ensure that FIFO is Empty here,since the last packet + from cppi-fifo may be transfered by transfer dma is still + availabe in fifo */ + while (count--) + { + wCsrVal = musb_readw(epio,MUSB_TXCSR); + if ((wCsrVal & MUSB_TXCSR_FIFONOTEMPTY) == 0) + break; + } + if( count <= 0 ) + DBG(5,"%s Warning: USB End point DMA disable before transfer complete\n",__FUNCTION__); +#endif /* ... or if not, then complete it */ musb_g_giveback(musb_ep, request, 0); @@ -512,10 +542,12 @@ * REVISIT for double buffering... * FIXME revisit for stalls too... */ +#if 0 musb_ep_select(mbase, epnum); csr = musb_readw(epio, MUSB_TXCSR); if (csr & MUSB_TXCSR_FIFONOTEMPTY) break; +#endif request = musb_ep->desc ? next_request(musb_ep) : NULL; @@ -525,8 +557,9 @@ break; } } - +#if defined(CONFIG_USB_MUSB_HDRC_HCD) || defined(CONFIG_USB_MUSB_OTG) txstate(musb, to_musb_request(request)); +#endif } } while (0); @@ -584,6 +617,8 @@ struct dma_controller *c = musb->dma_controller; struct dma_channel *channel = musb_ep->dma; + channel->actual_len = 0L; + /* NOTE: CPPI won't actually stop advancing the DMA * queue after short packet transfers, so this is almost * always going to run as IRQ-per-packet DMA so that @@ -599,8 +634,10 @@ * the cppi engine will be ready to take it as soon * as DMA is enabled */ +#if 0 csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE); +#endif csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; musb_writew(epio, MUSB_RXCSR, csr); return; @@ -749,12 +786,13 @@ csr, dma ? " (dma)" : "", request); if (csr & MUSB_RXCSR_P_SENTSTALL) { +#if 0 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { dma->status = MUSB_DMA_STATUS_CORE_ABORT; (void) musb->dma_controller->channel_abort(dma); request->actual += musb_ep->dma->actual_len; } - +#endif csr |= MUSB_RXCSR_P_WZC_BITS; csr &= ~MUSB_RXCSR_P_SENTSTALL; musb_writew(epio, MUSB_RXCSR, csr); @@ -777,7 +815,7 @@ /* REVISIT not necessarily an error */ DBG(4, "%s, incomprx\n", musb_ep->end_point.name); } - +#if 0 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { /* "should not happen"; likely RXPKTRDY pending for DMA */ DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1, @@ -785,17 +823,17 @@ musb_ep->end_point.name, csr); goto done; } - +#endif if (dma && (csr & MUSB_RXCSR_DMAENAB)) { - csr &= ~(MUSB_RXCSR_AUTOCLEAR +/* csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB - | MUSB_RXCSR_DMAMODE); + | MUSB_RXCSR_DMAMODE); */ musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_P_WZC_BITS | csr); request->actual += musb_ep->dma->actual_len; - DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", + DBG(4, "RXCSR%d %04x, dma off, %04x, len %Zd, req %p\n", epnum, csr, musb_readw(epio, MUSB_RXCSR), musb_ep->dma->actual_len, request); @@ -817,11 +855,11 @@ goto done; #endif musb_g_giveback(musb_ep, request, 0); - +#if 0 request = next_request(musb_ep); if (!request) goto done; - +#endif /* don't start more i/o till the stall clears */ musb_ep_select(mbase, epnum); csr = musb_readw(epio, MUSB_RXCSR); @@ -829,7 +867,7 @@ goto done; } - +#if 0 /* analyze request if the ep is hot */ if (request) rxstate(musb, to_musb_request(request)); @@ -837,7 +875,7 @@ DBG(3, "packet waiting for %s%s request\n", musb_ep->desc ? "" : "inactive ", musb_ep->end_point.name); - +#endif done: return; } @@ -1065,6 +1103,23 @@ kfree(to_musb_request(req)); } +/* + * dma-coherent memory allocation (for dma-capable endpoints) + * + * NOTE: the dma_*_coherent() API calls suck; most implementations are + * (a) page-oriented, so small buffers lose big, and (b) asymmetric with + * respect to calls with irqs disabled: alloc is safe, free is not. + */ +static void *musb_gadget_alloc_buffer(struct usb_ep *ep, unsigned bytes, + dma_addr_t * dma, gfp_t gfp_flags) +{ + struct musb_ep *musb_ep = to_musb_ep(ep); + + return dma_alloc_coherent(musb_ep->musb->controller, + bytes, dma, gfp_flags); +} + +static DEFINE_SPINLOCK(buflock); static LIST_HEAD(buffers); struct free_record { @@ -1074,6 +1129,42 @@ dma_addr_t dma; }; +static void do_free(unsigned long ignored) +{ + spin_lock_irq(&buflock); + while (!list_empty(&buffers)) { + struct free_record *buf; + + buf = list_entry(buffers.next, struct free_record, list); + list_del(&buf->list); + spin_unlock_irq(&buflock); + + dma_free_coherent(buf->dev, buf->bytes, buf, buf->dma); + + spin_lock_irq(&buflock); + } + spin_unlock_irq(&buflock); +} + +static DECLARE_TASKLET(deferred_free, do_free, 0); + +static void musb_gadget_free_buffer(struct usb_ep *ep, + void *address, dma_addr_t dma, unsigned bytes) +{ + struct musb_ep *musb_ep = to_musb_ep(ep); + struct free_record *buf = address; + unsigned long flags; + + buf->dev = musb_ep->musb->controller; + buf->bytes = bytes; + buf->dma = dma; + + spin_lock_irqsave(&buflock, flags); + list_add_tail(&buf->list, &buffers); + tasklet_schedule(&deferred_free); + spin_unlock_irqrestore(&buflock, flags); +} + /* * Context: controller locked, IRQs blocked. */ @@ -1157,11 +1248,13 @@ /* add request to the list */ list_add_tail(&(request->request.list), &(musb_ep->req_list)); - +#if defined(CONFIG_USB_MUSB_HDRC_HCD) || defined(CONFIG_USB_MUSB_OTG) /* it this is the head of the queue, start i/o ... */ if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next) musb_ep_restart(musb, request); - +#else + musb_ep_restart(musb, request); +#endif cleanup: spin_unlock_irqrestore(&musb->lock, lockflags); return status; @@ -1367,6 +1460,8 @@ .disable = musb_gadget_disable, .alloc_request = musb_alloc_request, .free_request = musb_free_request, + .alloc_buffer = musb_gadget_alloc_buffer, + .free_buffer = musb_gadget_free_buffer, .queue = musb_gadget_queue, .dequeue = musb_gadget_dequeue, .set_halt = musb_gadget_set_halt, @@ -1714,10 +1809,10 @@ retval = driver->bind(&musb->g); if (retval != 0) { DBG(3, "bind to driver %s failed --> %d\n", - driver->driver.name, retval); + driver->driver.name, retval); musb->gadget_driver = NULL; musb->g.dev.driver = NULL; - } + } spin_lock_irqsave(&musb->lock, flags); @@ -1767,6 +1862,7 @@ { int i; struct musb_hw_ep *hw_ep; + struct usb_ep *ep; /* don't disconnect if it's not connected */ if (musb->g.speed == USB_SPEED_UNKNOWN) @@ -1790,12 +1886,26 @@ i++, hw_ep++) { musb_ep_select(musb->mregs, i); if (hw_ep->is_shared_fifo /* || !epnum */) { - nuke(&hw_ep->ep_in, -ESHUTDOWN); + ep = &hw_ep->ep_in.end_point; + + spin_unlock(&musb->lock); + musb_gadget_disable(ep); + spin_lock(&musb->lock); } else { - if (hw_ep->max_packet_sz_tx) - nuke(&hw_ep->ep_in, -ESHUTDOWN); - if (hw_ep->max_packet_sz_rx) - nuke(&hw_ep->ep_out, -ESHUTDOWN); + if (hw_ep->max_packet_sz_tx) { + ep = &hw_ep->ep_in.end_point; + + spin_unlock(&musb->lock); + musb_gadget_disable(ep); + spin_lock(&musb->lock); + } + if (hw_ep->max_packet_sz_rx) { + ep = &hw_ep->ep_out.end_point; + + spin_unlock(&musb->lock); + musb_gadget_disable(ep); + spin_lock(&musb->lock); + } } } @@ -1884,7 +1994,7 @@ } break; default: - WARNING("unhandled RESUME transition (%s)\n", + WARN("unhandled RESUME transition (%s)\n", otg_state_string(musb)); } } @@ -1914,7 +2024,7 @@ /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ; * A_PERIPHERAL may need care too */ - WARNING("unhandled SUSPEND transition (%s)\n", + WARN("unhandled SUSPEND transition (%s)\n", otg_state_string(musb)); } }