/***************************************************************************** ** FILE NAME : dwc_otg_cil.c ** PROJECT : USB Host and Device driver ** MODULES : USB Host and Device driver ** SRC VERSION : 2.0 ** DATE : 1/March/2008 ** AUTHOR : Chen, Howard based on Synopsys Original ** DESCRIPTION : The Core Interface Layer provides basic services for accessing and ** managing the DWC_otg hardware. These services are used by both the ** Host Controller Driver and the Peripheral Controller Driver. ** ** The CIL manages the memory map for the core so that the HCD and PCD ** don't have to do this separately. It also handles basic tasks like ** reading/writing the registers and data FIFOs in the controller. ** Some of the data access functions provide encapsulation of several ** operations required to perform a task, such as writing multiple ** registers to start a transfer. Finally, the CIL performs basic ** services that are not specific to either the host or device modes ** of operation. These services include management of the OTG Host ** Negotiation Protocol (HNP) and Session Request Protocol (SRP). A ** Diagnostic API is also provided to allow testing of the controller ** hardware. ** FUNCTIONS : ** COMPILER : gcc ** REFERENCE : ** COPYRIGHT : ** Version Control Section ** ** $Author$ ** $Date$ ** $Revisions$ ** $Log$ Revision history *****************************************************************************/ /*! \file dwc_otg_cil.c \brief This file contains the interface to the Core Interface Layer. */ #include #include "ifxusb_version.h" //Make options //#define USE_ORIG //Compiling options //#define PARTIAL_POWER_DOWN #define REBOOT_DELAY 100 #include #include #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) #ifdef DEBUG #include #endif #endif #include "dwc_otg_ifx.h" #include "dwc_otg_plat.h" #include "dwc_otg_regs.h" #include "dwc_otg_cil.h" #include "dwc_otg_driver.h" #ifdef DWC_IS_DEVICE #include "dwc_otg_pcd.h" #endif #include #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) #include #else #include #endif void *usb_alloc_buf(size_t size, int clear) { uint32_t *cached,*uncached; uint32_t totalsize,page; if(!size) return 0; size=(size+3)&0xFFFFFFFC; totalsize=size + 12; page=get_order(totalsize); cached = (void *) __get_free_pages(GFP_ATOMIC | GFP_KERNEL | GFP_DMA, page); if(!cached) { printk(KERN_INFO "%s Allocation Failed size:%d\n",__func__,size); return NULL; } uncached = (uint32_t *)(KSEG1ADDR(cached)); if(clear) memset(uncached, 0, totalsize); *(uncached+0)=totalsize; *(uncached+1)=page; *(uncached+2)=(uint32_t)cached; return (void *)(uncached+3); } void usb_free_buf(void *vaddr) { uint32_t totalsize,page; uint32_t *cached,*uncached; if(vaddr != NULL) { uncached=vaddr; uncached-=3; totalsize=*(uncached+0); page=*(uncached+1); cached=(uint32_t *)(*(uncached+2)); if(totalsize && page==get_order(totalsize && cached==(uint32_t *)(KSEG0ADDR(uncached)))) { free_pages((unsigned long)cached, page); return; } printk(KERN_INFO "%s invalid info %p %p 0x%08x 0x%08x\n",__func__,vaddr,cached,totalsize,page); return; } } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) #include #include #include #include #include #endif /*!\fn dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *_reg_base_addr, const uint32_t *_fifo_base_addr, const uint32_t *_fifo_dbg_addr, dwc_otg_core_params_t *_core_params) \brief This function is called to initialize the DWC_otg CSR data structures. The register addresses in the device and host structures are initialized from the base address supplied by the caller. The calling function must make the OS calls to get the base address of the DWC_otg controller registers. The core_params argument holds the parameters that specify how the core should be configured. \param[in] _reg_base_addr Base address of DWC_otg core registers \param[in] _fifo_base_addr Fifo base address \param[in] _fifo_dbg_addr Fifo debug address \param[in] _core_params Pointer to the core configuration parameters \return 0: Fail; pointer of core controller IF */ dwc_otg_core_if_t *dwc_otg_cil_init(uint32_t *_reg_base_addr, uint32_t *_fifo_base_addr, uint32_t *_fifo_dbg_addr, dwc_otg_core_params_t *_core_params) { dwc_otg_core_if_t *core_if = 0; #ifdef DWC_IS_DEVICE dwc_otg_dev_if_t *dev_if = 0; #endif #ifdef DWC_IS_HOST dwc_otg_host_if_t *host_if = 0; #endif uint8_t *reg_base = (uint8_t *)_reg_base_addr; int i = 0; DWC_DEBUGPL(DBG_CILV, "%s(%p,%p)\n", __func__, _reg_base_addr, _core_params); core_if = kmalloc( sizeof(dwc_otg_core_if_t), GFP_KERNEL); if (core_if == 0) { DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_core_if_t failed\n"); return 0; } memset(core_if, 0, sizeof(dwc_otg_core_if_t)); core_if->core_params = _core_params; core_if->core_global_regs = (dwc_otg_core_global_regs_t *)reg_base; /* * Allocate the Device Mode structures. */ #ifdef DWC_IS_DEVICE dev_if = kmalloc( sizeof(dwc_otg_dev_if_t), GFP_KERNEL); if (dev_if == 0) { DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_dev_if_t failed\n"); kfree( core_if ); return 0; } dev_if->dev_global_regs = (dwc_otg_device_global_regs_t *)(reg_base + DWC_DEV_GLOBAL_REG_OFFSET); for (i=0; iin_ep_regs[i] = (dwc_otg_dev_in_ep_regs_t *) (reg_base + DWC_DEV_IN_EP_REG_OFFSET + (i * DWC_EP_REG_OFFSET)); dev_if->out_ep_regs[i] = (dwc_otg_dev_out_ep_regs_t *) (reg_base + DWC_DEV_OUT_EP_REG_OFFSET + (i * DWC_EP_REG_OFFSET)); DWC_DEBUGPL(DBG_CILV, "in_ep_regs[%d]->diepctl=%p\n", i, &dev_if->in_ep_regs[i]->diepctl); DWC_DEBUGPL(DBG_CILV, "out_ep_regs[%d]->doepctl=%p\n", i, &dev_if->out_ep_regs[i]->doepctl); } dev_if->speed = 0; // unknown dev_if->num_eps = MAX_EPS_CHANNELS; dev_if->num_perio_eps = 0; core_if->dev_if = dev_if; #endif //DWC_IS_DEVICE /* * Allocate the Host Mode structures. */ #ifdef DWC_IS_HOST host_if = kmalloc( sizeof(dwc_otg_host_if_t), GFP_KERNEL); if (host_if == 0) { DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_host_if_t failed\n"); // kfree( dev_if ); kfree( core_if ); return 0; } host_if->host_global_regs = (dwc_otg_host_global_regs_t *) (reg_base + DWC_OTG_HOST_GLOBAL_REG_OFFSET); host_if->hprt0 = (uint32_t*)(reg_base + DWC_OTG_HOST_PORT_REGS_OFFSET); for (i=0; ihc_regs[i] = (dwc_otg_hc_regs_t *) (reg_base + DWC_OTG_HOST_CHAN_REGS_OFFSET + (i * DWC_OTG_CHAN_REGS_OFFSET)); DWC_DEBUGPL(DBG_CILV, "hc_reg[%d]->hcchar=%p\n", i, &host_if->hc_regs[i]->hcchar); } host_if->num_host_channels = MAX_EPS_CHANNELS; core_if->host_if = host_if; #endif //DWC_IS_HOST /* * Allocate the FIFO Address. */ for (i=0; idata_fifo[i] = (uint32_t *)((uint32_t)_fifo_base_addr + (i * DWC_OTG_DATA_FIFO_SIZE)); DWC_DEBUGPL(DBG_CILV, "data_fifo[%d]=0x%08x\n", i, (unsigned)core_if->data_fifo[i]); } core_if->data_fifo_dbg = (uint32_t *)_fifo_dbg_addr; core_if->pcgcctl = (uint32_t*)(reg_base + DWC_OTG_PCGCCTL_OFFSET); /* * Store the contents of the hardware configuration registers here for * easy access later. */ core_if->hwcfg1.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg1); core_if->hwcfg2.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg2); core_if->hwcfg3.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg3); core_if->hwcfg4.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg4); DWC_DEBUGPL(DBG_CILV,"hwcfg1=%08x\n",core_if->hwcfg1.d32); DWC_DEBUGPL(DBG_CILV,"hwcfg2=%08x\n",core_if->hwcfg2.d32); DWC_DEBUGPL(DBG_CILV,"hwcfg3=%08x\n",core_if->hwcfg3.d32); DWC_DEBUGPL(DBG_CILV,"hwcfg4=%08x\n",core_if->hwcfg4.d32); DWC_DEBUGPL(DBG_CILV,"op_mode=%0x\n",core_if->hwcfg2.b.op_mode); DWC_DEBUGPL(DBG_CILV,"arch=%0x\n",core_if->hwcfg2.b.architecture); DWC_DEBUGPL(DBG_CILV,"num_dev_ep=%d\n",core_if->hwcfg2.b.num_dev_ep); DWC_DEBUGPL(DBG_CILV,"num_host_chan=%d\n",core_if->hwcfg2.b.num_host_chan); DWC_DEBUGPL(DBG_CILV,"nonperio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.nonperio_tx_q_depth); DWC_DEBUGPL(DBG_CILV,"host_perio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.host_perio_tx_q_depth); DWC_DEBUGPL(DBG_CILV,"dev_token_q_depth=0x%0x\n",core_if->hwcfg2.b.dev_token_q_depth); DWC_DEBUGPL(DBG_CILV,"Total FIFO SZ=%d\n", core_if->hwcfg3.b.dfifo_depth); DWC_DEBUGPL(DBG_CILV,"xfer_size_cntr_width=%0x\n", core_if->hwcfg3.b.xfer_size_cntr_width); return core_if; } /* * This function frees the structures allocated by dwc_otg_cil_init(). * * @param[in] _core_if The core interface pointer returned from * dwc_otg_cil_init(). * */ void dwc_otg_cil_remove( dwc_otg_core_if_t *_core_if ) { /* Disable all interrupts */ dwc_modify_reg32( &_core_if->core_global_regs->gahbcfg, 1, 0); dwc_write_reg32( &_core_if->core_global_regs->gintmsk, 0); #ifdef DWC_IS_DEVICE if ( _core_if->dev_if ) kfree( _core_if->dev_if ); #endif #ifdef DWC_IS_HOST if ( _core_if->host_if ) kfree( _core_if->host_if ); #endif kfree( _core_if ); } /* * This function enables the controller's Global Interrupt in the AHB Config * register. * * @param[in] _core_if Programming view of DWC_otg controller. */ extern void dwc_otg_enable_global_interrupts( dwc_otg_core_if_t *_core_if ) { gahbcfg_data_t ahbcfg ={ .d32 = 0}; ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */ dwc_modify_reg32(&_core_if->core_global_regs->gahbcfg, 0, ahbcfg.d32); } /* * This function disables the controller's Global Interrupt in the AHB Config * register. * * @param[in] _core_if Programming view of DWC_otg controller. */ extern void dwc_otg_disable_global_interrupts( dwc_otg_core_if_t *_core_if ) { gahbcfg_data_t ahbcfg ={ .d32 = 0}; ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */ dwc_modify_reg32(&_core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0); } /* * This function initializes the commmon interrupts, used in both * device and host modes. * * @param[in] _core_if Programming view of the DWC_otg controller * */ static void dwc_otg_enable_common_interrupts(dwc_otg_core_if_t *_core_if) { dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs; gintmsk_data_t intr_mask ={ .d32 = 0}; /* Clear any pending OTG Interrupts */ dwc_write_reg32( &global_regs->gotgint, 0xFFFFFFFF); /* Clear any pending interrupts */ dwc_write_reg32( &global_regs->gintsts, 0xFFFFFFFF); /* Enable the interrupts in the GINTMSK.*/ intr_mask.b.modemismatch = 1; #ifndef USE_INTERNAL_DMA intr_mask.b.rxstsqlvl = 1; #endif intr_mask.b.conidstschng = 1; intr_mask.b.wkupintr = 1; intr_mask.b.disconnect = 1; intr_mask.b.usbsuspend = 1; dwc_write_reg32( &global_regs->gintmsk, intr_mask.d32); } /* * Initializes the FSLSPClkSel field of the HCFG register depending on the PHY type. */ #ifdef DWC_IS_HOST static void init_fslspclksel(dwc_otg_core_if_t *_core_if) { #ifdef DWC_IS_HOST uint32_t val; hcfg_data_t hcfg; val = DWC_HCFG_30_60_MHZ; DWC_DEBUGPL(DBG_CIL, "Initializing HCFG.FSLSPClkSel to 0x%1x\n", val); hcfg.d32 = dwc_read_reg32(&_core_if->host_if->host_global_regs->hcfg); hcfg.b.fslspclksel = val; dwc_write_reg32(&_core_if->host_if->host_global_regs->hcfg, hcfg.d32); #endif } #endif /* * Initializes the DevSpd field of the DCFG register depending on the PHY type * and the enumeration speed of the device. */ void init_devspd(dwc_otg_core_if_t *_core_if) { #ifdef DWC_IS_DEVICE uint32_t val; dcfg_data_t dcfg; #ifdef FORCE_USB11 val = 0x1; #else if (_core_if->core_params->speed == DWC_SPEED_PARAM_FULL) /* High speed PHY running at full speed */ val = 0x1; else /* High speed PHY running at high speed */ val = 0x0; #endif DWC_DEBUGPL(DBG_CIL, "Initializing DCFG.DevSpd to 0x%1x\n", val); dcfg.d32 = dwc_read_reg32(&_core_if->dev_if->dev_global_regs->dcfg); dcfg.b.devspd = val; dwc_write_reg32(&_core_if->dev_if->dev_global_regs->dcfg, dcfg.d32); #endif } /* * This function initializes the DWC_otg controller registers and * prepares the core for device mode or host mode operation. * * @param _core_if Programming view of the DWC_otg controller * */ void dwc_otg_core_init(dwc_otg_core_if_t *_core_if) { dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs; #ifdef DWC_IS_DEVICE dwc_otg_dev_if_t *dev_if = _core_if->dev_if; int i = 0; #endif gahbcfg_data_t ahbcfg ={ .d32 = 0}; gusbcfg_data_t usbcfg ={ .d32 = 0}; // gi2cctl_data_t i2cctl ={ .d32 = 0}; DWC_DEBUGPL(DBG_CILV, "dwc_otg_core_init(%p)\n",_core_if); /* Common Initialization */ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); usbcfg.b.ulpi_ext_vbus_drv = 1; usbcfg.b.term_sel_dl_pulse = 0; dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); /* Reset the Controller */ do { while(dwc_otg_core_reset( _core_if )) dwc_otg_hard_reset(); #ifdef DWC_IS_DEVICE } while (dwc_otg_is_host_mode(_core_if)); #endif #ifdef DWC_IS_HOST } while (dwc_otg_is_device_mode(_core_if)); #endif /* Initialize parameters from Hardware configuration registers. */ #ifdef DWC_IS_DEVICE dev_if->num_eps = _core_if->hwcfg2.b.num_dev_ep; dev_if->num_perio_eps = _core_if->hwcfg4.b.num_dev_perio_in_ep; DWC_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n",_core_if->hwcfg4.b.num_dev_perio_in_ep); for (i=0; i < _core_if->hwcfg4.b.num_dev_perio_in_ep; i++) { dev_if->perio_tx_fifo_size[i] = dwc_read_reg32( &global_regs->dptxfsiz[i]) >> 16; DWC_DEBUGPL(DBG_CIL, "Periodic Tx FIFO SZ #%d=0x%0x\n", i, dev_if->perio_tx_fifo_size[i]); } #endif _core_if->total_fifo_size = _core_if->hwcfg3.b.dfifo_depth; _core_if->rx_fifo_size = dwc_read_reg32( &global_regs->grxfsiz); _core_if->nperio_tx_fifo_size = dwc_read_reg32( &global_regs->gnptxfsiz) >> 16; DWC_DEBUGPL(DBG_CIL, "Total FIFO SZ=%d\n", _core_if->total_fifo_size); DWC_DEBUGPL(DBG_CIL, "Rx FIFO SZ=%d\n", _core_if->rx_fifo_size); DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO SZ=%d\n", _core_if->nperio_tx_fifo_size); /* This programming sequence needs to happen in FS mode before any other * programming occurs */ /* High speed PHY. */ if (!_core_if->phy_init_done) { _core_if->phy_init_done = 1; /* HS PHY parameters. These parameters are preserved * during soft reset so only program the first time. Do * a soft reset immediately after setting phyif. */ usbcfg.b.ulpi_utmi_sel = 0; //UTMI+ usbcfg.b.phyif = (_core_if->core_params->phy_utmi_width == 16)?1:0; dwc_write_reg32( &global_regs->gusbcfg, usbcfg.d32); /* Reset after setting the PHY parameters */ dwc_otg_core_reset( _core_if ); } usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); usbcfg.b.ulpi_fsls = 0; usbcfg.b.ulpi_clk_sus_m = 0; dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); /* Program the GAHBCFG Register.*/ #ifdef USE_INTERNAL_DMA switch (_core_if->core_params->dma_burst_size) { case 0 : ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_SINGLE; break; case 1 : ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR; break; case 4 : ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR4; break; case 8 : ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR8; break; case 16: ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR16; break; } ahbcfg.b.dmaenable = 1; #else ahbcfg.b.nptxfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY; ahbcfg.b.ptxfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY; ahbcfg.b.dmaenable = 0; #endif dwc_write_reg32(&global_regs->gahbcfg, ahbcfg.d32); /* * Program the GUSBCFG register. */ usbcfg.d32 = dwc_read_reg32( &global_regs->gusbcfg ); usbcfg.b.hnpcap = 0; usbcfg.b.srpcap = 0; dwc_write_reg32( &global_regs->gusbcfg, usbcfg.d32); /* Enable common interrupts */ dwc_otg_enable_common_interrupts( _core_if ); /* Do device or host intialization based on mode during PCD and HCD initialization */ #ifdef DWC_IS_HOST DWC_DEBUGPL(DBG_ANY, "Host Mode\n" ); #endif #ifdef DWC_IS_DEVICE DWC_DEBUGPL(DBG_ANY, "Device Mode\n" ); dwc_otg_core_dev_init( _core_if ); #endif } #ifdef DWC_IS_DEVICE /* * This function enables the Device mode interrupts. * * @param _core_if Programming view of DWC_otg controller */ void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *_core_if) { gintmsk_data_t intr_mask ={ .d32 = 0}; dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs; DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__); /* Disable all interrupts. */ dwc_write_reg32( &global_regs->gintmsk, 0); /* Clear any pending interrupts */ dwc_write_reg32( &global_regs->gintsts, 0xFFFFFFFF); /* Enable the common interrupts */ dwc_otg_enable_common_interrupts( _core_if ); /* Enable interrupts */ intr_mask.b.usbreset = 1; intr_mask.b.enumdone = 1; intr_mask.b.epmismatch = 1; intr_mask.b.inepintr = 1; intr_mask.b.outepintr = 1; // intr_mask.b.erlysuspend = 1; dwc_modify_reg32( &global_regs->gintmsk, intr_mask.d32, intr_mask.d32); DWC_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__, dwc_read_reg32( &global_regs->gintmsk)); } /* * This function initializes the DWC_otg controller registers for * device mode. * * @param _core_if Programming view of DWC_otg controller * */ void dwc_otg_core_dev_init(dwc_otg_core_if_t *_core_if) { dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs; dwc_otg_dev_if_t *dev_if = _core_if->dev_if; dwc_otg_core_params_t *params = _core_if->core_params; dcfg_data_t dcfg ={.d32 = 0}; grstctl_t resetctl ={ .d32=0 }; int i; uint32_t rx_fifo_size; fifosize_data_t nptxfifosize; #ifdef USE_PERIODIC_EP fifosize_data_t ptxfifosize; #endif /* Restart the Phy Clock */ dwc_write_reg32(_core_if->pcgcctl, 0); /* Device configuration register */ init_devspd(_core_if); dcfg.d32 = dwc_read_reg32( &dev_if->dev_global_regs->dcfg); dcfg.b.perfrint = DWC_DCFG_FRAME_INTERVAL_80; dwc_write_reg32( &dev_if->dev_global_regs->dcfg, dcfg.d32 ); /* Configure data FIFO sizes */ DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n", _core_if->total_fifo_size); DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n", params->dev_rx_fifo_size); DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n", params->dev_nperio_tx_fifo_size); /* Rx FIFO */ DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz)); rx_fifo_size = params->dev_rx_fifo_size; dwc_write_reg32( &global_regs->grxfsiz, rx_fifo_size ); DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz)); /* Non-periodic Tx FIFO */ DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz)); nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size; nptxfifosize.b.startaddr = params->dev_rx_fifo_size; dwc_write_reg32( &global_regs->gnptxfsiz, nptxfifosize.d32 ); DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz)); #ifdef USE_PERIODIC_EP /* * Periodic Tx FIFOs These FIFOs are numbered from 1 to 15. * Indexes of the FIFO size module parameters in the * dev_perio_tx_fifo_size array and the FIFO size registers in * the dptxfsiz array run from 0 to 14. */ ptxfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth; for (i=0; i < dev_if->num_perio_eps; i++) { DWC_DEBUGPL(DBG_CIL, "initial dptxfsiz[%d]=%08x\n", i, dwc_read_reg32(&global_regs->dptxfsiz[i])); dwc_write_reg32( &global_regs->dptxfsiz[i], ptxfifosize.d32 ); DWC_DEBUGPL(DBG_CIL, "new dptxfsiz[%d]=%08x\n", i, dwc_read_reg32(&global_regs->dptxfsiz[i])); ptxfifosize.b.startaddr += params->dev_perio_tx_fifo_size[i]; } #endif /* Flush the FIFOs */ dwc_otg_flush_tx_fifo(_core_if, 0x10); /* all Tx FIFOs */ dwc_otg_flush_rx_fifo(_core_if); /* Flush the Learning Queue. */ resetctl.b.intknqflsh = 1; dwc_write_reg32( &_core_if->core_global_regs->grstctl, resetctl.d32); /* Clear all pending Device Interrupts */ dwc_write_reg32( &dev_if->dev_global_regs->diepmsk, 0 ); dwc_write_reg32( &dev_if->dev_global_regs->doepmsk, 0 ); dwc_write_reg32( &dev_if->dev_global_regs->daint, 0xFFFFFFFF ); dwc_write_reg32( &dev_if->dev_global_regs->daintmsk, 0 ); for (i=0; i < dev_if->num_eps; i++) { depctl_data_t depctl; depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl); if (depctl.b.epena) { depctl.d32 = 0; depctl.b.epdis = 1; depctl.b.snak = 1; } else depctl.d32 = 0; dwc_write_reg32( &dev_if->in_ep_regs[i]->diepctl, depctl.d32); depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl); if (depctl.b.epena) { depctl.d32 = 0; depctl.b.epdis = 1; depctl.b.snak = 1; } else depctl.d32 = 0; dwc_write_reg32( &dev_if->out_ep_regs[i]->doepctl, depctl.d32); dwc_write_reg32( &dev_if->in_ep_regs[i]->dieptsiz, 0); dwc_write_reg32( &dev_if->out_ep_regs[i]->doeptsiz, 0); dwc_write_reg32( &dev_if->in_ep_regs[i]->diepdma, 0); dwc_write_reg32( &dev_if->out_ep_regs[i]->doepdma, 0); dwc_write_reg32( &dev_if->in_ep_regs[i]->diepint, 0xFF); dwc_write_reg32( &dev_if->out_ep_regs[i]->doepint, 0xFF); } dwc_otg_enable_device_interrupts( _core_if ); } #endif // DWC_IS_DEVICE /* * This function enables the Host mode interrupts. * * @param _core_if Programming view of DWC_otg controller */ #ifdef DWC_IS_HOST void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *_core_if) { dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs; gintmsk_data_t intr_mask ={.d32 = 0}; DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__); /* Disable all interrupts. */ dwc_write_reg32(&global_regs->gintmsk, 0); /* Clear any pending interrupts. */ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF); /* Enable the common interrupts */ dwc_otg_enable_common_interrupts(_core_if); /* * Enable host mode interrupts without disturbing common * interrupts. */ #if defined(__NONE_SOF_INTR__) intr_mask.b.sofintr = 0; #else intr_mask.b.sofintr = 1; #endif intr_mask.b.portintr = 1; intr_mask.b.hcintr = 1; dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32); } /* * This function disables the Host Mode interrupts. * * @param _core_if Programming view of DWC_otg controller */ void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *_core_if) { dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs; gintmsk_data_t intr_mask ={.d32 = 0}; DWC_DEBUGPL(DBG_CILV, "%s()\n", __func__); /* * Disable host mode interrupts without disturbing common * interrupts. */ intr_mask.b.sofintr = 1; intr_mask.b.portintr = 1; intr_mask.b.hcintr = 1; intr_mask.b.ptxfempty = 1; intr_mask.b.nptxfempty = 1; dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); } /* * This function initializes the DWC_otg controller registers for * host mode. * * This function flushes the Tx and Rx FIFOs and it flushes any entries in the * request queues. Host channels are reset to ensure that they are ready for * performing transfers. * * @param _core_if Programming view of DWC_otg controller * */ void dwc_otg_core_host_init(dwc_otg_core_if_t *_core_if) { dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs; dwc_otg_host_if_t *host_if = _core_if->host_if; dwc_otg_core_params_t *params = _core_if->core_params; hprt0_data_t hprt0 ={.d32 = 0}; fifosize_data_t nptxfifosize; fifosize_data_t ptxfifosize; int i; hcchar_data_t hcchar; hcfg_data_t hcfg; dwc_otg_hc_regs_t *hc_regs; int num_channels; gotgctl_data_t gotgctl ={.d32 = 0}; DWC_DEBUGPL(DBG_CILV,"%s(%p)\n", __func__, _core_if); /* Restart the Phy Clock */ dwc_write_reg32(_core_if->pcgcctl, 0); /* Initialize Host Configuration Register */ init_fslspclksel(_core_if); if (_core_if->core_params->speed == DWC_SPEED_PARAM_FULL) { hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg); hcfg.b.fslssupp = 1; dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32); } /* Configure data FIFO sizes */ DWC_DEBUGPL(DBG_CIL,"Total FIFO Size=%d\n", _core_if->total_fifo_size); DWC_DEBUGPL(DBG_CIL,"Rx FIFO Size=%d\n", params->host_rx_fifo_size); DWC_DEBUGPL(DBG_CIL,"NP Tx FIFO Size=%d\n", params->host_nperio_tx_fifo_size); DWC_DEBUGPL(DBG_CIL,"P Tx FIFO Size=%d\n", params->host_perio_tx_fifo_size); /* Rx FIFO */ DWC_DEBUGPL(DBG_CIL,"initial grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz)); dwc_write_reg32(&global_regs->grxfsiz, params->host_rx_fifo_size); DWC_DEBUGPL(DBG_CIL,"new grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz)); /* Non-periodic Tx FIFO */ DWC_DEBUGPL(DBG_CIL,"initial gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz)); nptxfifosize.b.depth = params->host_nperio_tx_fifo_size; nptxfifosize.b.startaddr = params->host_rx_fifo_size; dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32); DWC_DEBUGPL(DBG_CIL,"new gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz)); /* Periodic Tx FIFO */ DWC_DEBUGPL(DBG_CIL,"initial hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz)); ptxfifosize.b.depth = params->host_perio_tx_fifo_size; ptxfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth; dwc_write_reg32(&global_regs->hptxfsiz, ptxfifosize.d32); DWC_DEBUGPL(DBG_CIL,"new hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz)); /* Clear Host Set HNP Enable in the OTG Control Register */ gotgctl.b.hstsethnpen = 1; dwc_modify_reg32( &global_regs->gotgctl, gotgctl.d32, 0); /* Make sure the FIFOs are flushed. */ dwc_otg_flush_tx_fifo(_core_if, 0x10 /* all Tx FIFOs */); dwc_otg_flush_rx_fifo(_core_if); /* Flush out any leftover queued requests. */ num_channels = _core_if->core_params->host_channels; for (i = 0; i < num_channels; i++) { hc_regs = _core_if->host_if->hc_regs[i]; hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.b.chen = 0; hcchar.b.chdis = 1; hcchar.b.epdir = 0; dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); } /* Halt all channels to put them into a known state. */ for (i = 0; i < num_channels; i++) { int count = 0; hc_regs = _core_if->host_if->hc_regs[i]; hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.b.chen = 1; hcchar.b.chdis = 1; hcchar.b.epdir = 0; dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); DWC_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i); do{ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); if (++count > 1000) { DWC_ERROR("%s: Unable to clear halt on channel %d\n", __func__, i); break; } } while (hcchar.b.chen); } /* Turn on the vbus power. */ { hprt0.d32 = dwc_otg_read_hprt0(_core_if); DWC_PRINT("Init: Power Port (%d)\n", hprt0.b.prtpwr); if (hprt0.b.prtpwr == 0 ) { hprt0.b.prtpwr = 1; dwc_write_reg32(host_if->hprt0, hprt0.d32); dwc_otg_vbus_on(); } } dwc_otg_enable_host_interrupts( _core_if ); } /* * Prepares a host channel for transferring packets to/from a specific * endpoint. The HCCHARn register is set up with the characteristics specified * in _hc. Host channel interrupts that may need to be serviced while this * transfer is in progress are enabled. * * @param _core_if Programming view of DWC_otg controller * @param _hc Information needed to initialize the host channel */ void dwc_otg_hc_init(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) { uint32_t intr_enable; hcintmsk_data_t hc_intr_mask; gintmsk_data_t gintmsk ={.d32 = 0}; hcchar_data_t hcchar; hcsplt_data_t hcsplt; uint8_t hc_num = _hc->hc_num; dwc_otg_host_if_t *host_if = _core_if->host_if; dwc_otg_hc_regs_t *hc_regs = host_if->hc_regs[hc_num]; /* Clear old interrupt conditions for this host channel. */ hc_intr_mask.d32 = 0xFFFFFFFF; hc_intr_mask.b.reserved = 0; dwc_write_reg32(&hc_regs->hcint, hc_intr_mask.d32); /* Enable channel interrupts required for this transfer. */ hc_intr_mask.d32 = 0; hc_intr_mask.b.chhltd = 1; #ifdef USE_INTERNAL_DMA hc_intr_mask.b.ahberr = 1; if (_hc->error_state && !_hc->do_split && _hc->ep_type != DWC_OTG_EP_TYPE_ISOC) { hc_intr_mask.b.ack = 1; if (_hc->ep_is_in) { hc_intr_mask.b.datatglerr = 1; if (_hc->ep_type != DWC_OTG_EP_TYPE_INTR) hc_intr_mask.b.nak = 1; } } #else switch (_hc->ep_type) { case DWC_OTG_EP_TYPE_CONTROL: case DWC_OTG_EP_TYPE_BULK: hc_intr_mask.b.xfercompl = 1; hc_intr_mask.b.stall = 1; hc_intr_mask.b.xacterr = 1; hc_intr_mask.b.datatglerr = 1; if (_hc->ep_is_in) hc_intr_mask.b.bblerr = 1; else { hc_intr_mask.b.nak = 1; hc_intr_mask.b.nyet = 1; if (_hc->do_ping) hc_intr_mask.b.ack = 1; } if (_hc->do_split) { hc_intr_mask.b.nak = 1; if (_hc->complete_split) hc_intr_mask.b.nyet = 1; else hc_intr_mask.b.ack = 1; } if (_hc->error_state) hc_intr_mask.b.ack = 1; break; case DWC_OTG_EP_TYPE_INTR: hc_intr_mask.b.xfercompl = 1; hc_intr_mask.b.nak = 1; hc_intr_mask.b.stall = 1; hc_intr_mask.b.xacterr = 1; hc_intr_mask.b.datatglerr = 1; hc_intr_mask.b.frmovrun = 1; if (_hc->ep_is_in) hc_intr_mask.b.bblerr = 1; if (_hc->error_state) hc_intr_mask.b.ack = 1; if (_hc->do_split) { if (_hc->complete_split) hc_intr_mask.b.nyet = 1; else hc_intr_mask.b.ack = 1; } break; case DWC_OTG_EP_TYPE_ISOC: hc_intr_mask.b.xfercompl = 1; hc_intr_mask.b.frmovrun = 1; hc_intr_mask.b.ack = 1; if (_hc->ep_is_in) { hc_intr_mask.b.xacterr = 1; hc_intr_mask.b.bblerr = 1; } break; } #endif dwc_write_reg32(&hc_regs->hcintmsk, hc_intr_mask.d32); /* Enable the top level host channel interrupt. */ intr_enable = (1 << hc_num); dwc_modify_reg32(&host_if->host_global_regs->haintmsk, 0, intr_enable); /* Make sure host channel interrupts are enabled. */ gintmsk.b.hcintr = 1; dwc_modify_reg32(&_core_if->core_global_regs->gintmsk, 0, gintmsk.d32); /* * Program the HCCHARn register with the endpoint characteristics for * the current transfer. */ hcchar.d32 = 0; hcchar.b.devaddr = _hc->dev_addr; hcchar.b.epnum = _hc->ep_num; hcchar.b.epdir = _hc->ep_is_in; hcchar.b.lspddev = (_hc->speed == DWC_OTG_EP_SPEED_LOW); hcchar.b.eptype = _hc->ep_type; hcchar.b.mps = _hc->max_packet; dwc_write_reg32(&host_if->hc_regs[hc_num]->hcchar, hcchar.d32); DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num); DWC_DEBUGPL(DBG_HCDV, " Dev Addr: %d\n", hcchar.b.devaddr); DWC_DEBUGPL(DBG_HCDV, " Ep Num: %d\n", hcchar.b.epnum); DWC_DEBUGPL(DBG_HCDV, " Is In: %d\n", hcchar.b.epdir); DWC_DEBUGPL(DBG_HCDV, " Is Low Speed: %d\n", hcchar.b.lspddev); DWC_DEBUGPL(DBG_HCDV, " Ep Type: %d\n", hcchar.b.eptype); DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps); DWC_DEBUGPL(DBG_HCDV, " Multi Cnt: %d\n", hcchar.b.multicnt); /* * Program the HCSPLIT register for SPLITs */ hcsplt.d32 = 0; if (_hc->do_split) { DWC_DEBUGPL(DBG_HCDV, "Programming HC %d with split --> %s\n", _hc->hc_num, _hc->complete_split ? "CSPLIT" : "SSPLIT"); hcsplt.b.compsplt = _hc->complete_split; hcsplt.b.xactpos = _hc->xact_pos; hcsplt.b.hubaddr = _hc->hub_addr; hcsplt.b.prtaddr = _hc->port_addr; DWC_DEBUGPL(DBG_HCDV, " comp split %d\n", _hc->complete_split); DWC_DEBUGPL(DBG_HCDV, " xact pos %d\n", _hc->xact_pos); DWC_DEBUGPL(DBG_HCDV, " hub addr %d\n", _hc->hub_addr); DWC_DEBUGPL(DBG_HCDV, " port addr %d\n", _hc->port_addr); DWC_DEBUGPL(DBG_HCDV, " is_in %d\n", _hc->ep_is_in); DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps); DWC_DEBUGPL(DBG_HCDV, " xferlen: %d\n", _hc->xfer_len); } dwc_write_reg32(&host_if->hc_regs[hc_num]->hcsplt, hcsplt.d32); } /* * Attempts to halt a host channel. This function should only be called in * Slave mode or to abort a transfer in either Slave mode or DMA mode. Under * normal circumstances in DMA mode, the controller halts the channel when the * transfer is complete or a condition occurs that requires application * intervention. * * In slave mode, checks for a free request queue entry, then sets the Channel * Enable and Channel Disable bits of the Host Channel Characteristics * register of the specified channel to intiate the halt. If there is no free * request queue entry, sets only the Channel Disable bit of the HCCHARn * register to flush requests for this channel. In the latter case, sets a * flag to indicate that the host channel needs to be halted when a request * queue slot is open. * * In DMA mode, always sets the Channel Enable and Channel Disable bits of the * HCCHARn register. The controller ensures there is space in the request * queue before submitting the halt request. * * Some time may elapse before the core flushes any posted requests for this * host channel and halts. The Channel Halted interrupt handler completes the * deactivation of the host channel. * * @param _core_if Controller register interface. * @param _hc Host channel to halt. * @param _halt_status Reason for halting the channel. */ void dwc_otg_hc_halt(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc, dwc_otg_halt_status_e _halt_status) { #ifndef USE_INTERNAL_DMA gnptxsts_data_t nptxsts; hptxsts_data_t hptxsts; #endif hcchar_data_t hcchar; dwc_otg_hc_regs_t *hc_regs; dwc_otg_core_global_regs_t *global_regs; dwc_otg_host_global_regs_t *host_global_regs; hc_regs = _core_if->host_if->hc_regs[_hc->hc_num]; global_regs = _core_if->core_global_regs; host_global_regs = _core_if->host_if->host_global_regs; WARN_ON(_halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS); #if defined(USE_INTERNAL_DMA) && defined(HOST_UNALIGNED_BUFFER_ADJUST) if(_hc->using_aligned_rx_buf) { memcpy(_hc->xfer_buff,_hc->aligned_rx_buf,_hc->xfer_len);?? } _hc->using_aligned_rx_buf=0; _hc->using_aligned_tx_buf=0; #endif if (_halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE || _halt_status == DWC_OTG_HC_XFER_AHB_ERR) { /* * Disable all channel interrupts except Ch Halted. The QTD * and QH state associated with this transfer has been cleared * (in the case of URB_DEQUEUE), so the channel needs to be * shut down carefully to prevent crashes. */ hcintmsk_data_t hcintmsk; hcintmsk.d32 = 0; hcintmsk.b.chhltd = 1; dwc_write_reg32(&hc_regs->hcintmsk, hcintmsk.d32); /* * Make sure no other interrupts besides halt are currently * pending. Handling another interrupt could cause a crash due * to the QTD and QH state. */ dwc_write_reg32(&hc_regs->hcint, ~hcintmsk.d32); /* * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR * even if the channel was already halted for some other * reason. */ _hc->halt_status = _halt_status; hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); if (hcchar.b.chen == 0) { /* * The channel is either already halted or it hasn't * started yet. In DMA mode, the transfer may halt if * it finishes normally or a condition occurs that * requires driver intervention. Don't want to halt * the channel again. In either Slave or DMA mode, * it's possible that the transfer has been assigned * to a channel, but not started yet when an URB is * dequeued. Don't want to halt a channel that hasn't * started yet. */ return; } } if (_hc->halt_pending) { /* * A halt has already been issued for this channel. This might * happen when a transfer is aborted by a higher level in * the stack. */ #ifdef DEBUG DWC_PRINT("*** %s: Channel %d, _hc->halt_pending already set ***\n", __func__, _hc->hc_num); /* dwc_otg_dump_global_registers(_core_if); */ /* dwc_otg_dump_host_registers(_core_if); */ #endif return; } hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.b.chen = 1; hcchar.b.chdis = 1; #ifndef USE_INTERNAL_DMA /* Check for space in the request queue to issue the halt. */ if (_hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || _hc->ep_type == DWC_OTG_EP_TYPE_BULK) { nptxsts.d32 = dwc_read_reg32(&global_regs->gnptxsts); if (nptxsts.b.nptxqspcavail == 0) hcchar.b.chen = 0; } else { hptxsts.d32 = dwc_read_reg32(&host_global_regs->hptxsts); if ((hptxsts.b.ptxqspcavail == 0) || (_core_if->queuing_high_bandwidth)) hcchar.b.chen = 0; } #endif dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); _hc->halt_status = _halt_status; if (hcchar.b.chen) { _hc->halt_pending = 1; _hc->halt_on_queue = 0; } else _hc->halt_on_queue = 1; DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num); DWC_DEBUGPL(DBG_HCDV, " hcchar: 0x%08x\n", hcchar.d32); DWC_DEBUGPL(DBG_HCDV, " halt_pending: %d\n", _hc->halt_pending); DWC_DEBUGPL(DBG_HCDV, " halt_on_queue: %d\n", _hc->halt_on_queue); DWC_DEBUGPL(DBG_HCDV, " halt_status: %d\n", _hc->halt_status); return; } /* * Clears the transfer state for a host channel. This function is normally * called after a transfer is done and the host channel is being released. * * @param _core_if Programming view of DWC_otg controller. * @param _hc Identifies the host channel to clean up. */ void dwc_otg_hc_cleanup(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) { dwc_otg_hc_regs_t *hc_regs; _hc->xfer_started = 0; /* * Clear channel interrupt enables and any unhandled channel interrupt * conditions. */ hc_regs = _core_if->host_if->hc_regs[_hc->hc_num]; dwc_write_reg32(&hc_regs->hcintmsk, 0); dwc_write_reg32(&hc_regs->hcint, 0xFFFFFFFF); #ifdef DEBUG del_timer(&_core_if->hc_xfer_timer[_hc->hc_num]); { hcchar_data_t hcchar; hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); if (hcchar.b.chdis) DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n", __func__, _hc->hc_num, hcchar.d32); } #endif } /* * Sets the channel property that indicates in which frame a periodic transfer * should occur. This is always set to the _next_ frame. This function has no * effect on non-periodic transfers. * * @param _core_if Programming view of DWC_otg controller. * @param _hc Identifies the host channel to set up and its properties. * @param _hcchar Current value of the HCCHAR register for the specified host * channel. */ static inline void hc_set_even_odd_frame(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc, hcchar_data_t *_hcchar) { if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR || _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { hfnum_data_t hfnum; hfnum.d32 = dwc_read_reg32(&_core_if->host_if->host_global_regs->hfnum); /* 1 if _next_ frame is odd, 0 if it's even */ _hcchar->b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1; #ifdef DEBUG if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR && _hc->do_split && !_hc->complete_split) { switch (hfnum.b.frnum & 0x7) { case 7: _core_if->hfnum_7_samples++; _core_if->hfnum_7_frrem_accum += hfnum.b.frrem; break; case 0: _core_if->hfnum_0_samples++; _core_if->hfnum_0_frrem_accum += hfnum.b.frrem; break; default: _core_if->hfnum_other_samples++; _core_if->hfnum_other_frrem_accum += hfnum.b.frrem; break; } } #endif } } #ifdef DEBUG #if defined(CONFIG_AMAZON_S) extern int dwc_core_num; #endif static void hc_xfer_timeout(unsigned long _ptr) { hc_xfer_info_t *xfer_info = (hc_xfer_info_t *)_ptr; int hc_num = xfer_info->hc->hc_num; #if defined(CONFIG_AMAZON_S) DWC_WARN("%s: dwc_core_num[%d] --- timeout on channel %d\n", __func__, dwc_core_num, hc_num); #else DWC_WARN("%s: timeout on channel %d\n", __func__, hc_num); #endif DWC_WARN(" start_hcchar_val 0x%08x\n", xfer_info->core_if->start_hcchar_val[hc_num]); } #endif /* * This function does the setup for a data transfer for a host channel and * starts the transfer. May be called in either Slave mode or DMA mode. In * Slave mode, the caller must ensure that there is sufficient space in the * request queue and Tx Data FIFO. * * For an OUT transfer in Slave mode, it loads a data packet into the * appropriate FIFO. If necessary, additional data packets will be loaded in * the Host ISR. * * For an IN transfer in Slave mode, a data packet is requested. The data * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, * additional data packets are requested in the Host ISR. * * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ * register along with a packet count of 1 and the channel is enabled. This * causes a single PING transaction to occur. Other fields in HCTSIZ are * simply set to 0 since no data transfer occurs in this case. * * For a PING transfer in DMA mode, the HCTSIZ register is initialized with * all the information required to perform the subsequent data transfer. In * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the * controller performs the entire PING protocol, then starts the data * transfer. * * @param _core_if Programming view of DWC_otg controller. * @param _hc Information needed to initialize the host channel. The xfer_len * value may be reduced to accommodate the max widths of the XferSize and * PktCnt fields in the HCTSIZn register. The multi_count value may be changed * to reflect the final xfer_len value. */ void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) { hcchar_data_t hcchar; hctsiz_data_t hctsiz; uint16_t num_packets; uint32_t max_hc_xfer_size = _core_if->core_params->max_transfer_size; uint16_t max_hc_pkt_count = _core_if->core_params->max_packet_count; dwc_otg_hc_regs_t *hc_regs = _core_if->host_if->hc_regs[_hc->hc_num]; hctsiz.d32 = 0; #ifdef USE_INTERNAL_DMA if (_hc->do_ping) hctsiz.b.dopng = 1; #else if (_hc->do_ping) { dwc_otg_hc_do_ping(_core_if, _hc); _hc->xfer_started = 1; return; } #endif if (_hc->do_split) { num_packets = 1; if (_hc->complete_split && !_hc->ep_is_in) _hc->xfer_len = 0; else if (_hc->ep_is_in || (_hc->xfer_len > _hc->max_packet)) _hc->xfer_len = _hc->max_packet; else if (!_hc->ep_is_in && (_hc->xfer_len > 188)) _hc->xfer_len = 188; hctsiz.b.xfersize = _hc->xfer_len; } else { /* * Ensure that the transfer length and packet count will fit * in the widths allocated for them in the HCTSIZn register. */ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR || _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { /* * Make sure the transfer size is no larger than one * (micro)frame's worth of data. (A check was done * when the periodic transfer was accepted to ensure * that a (micro)frame's worth of data can be * programmed into a channel.) */ uint32_t max_periodic_len = _hc->multi_count * _hc->max_packet; if (_hc->xfer_len > max_periodic_len) _hc->xfer_len = max_periodic_len; } else if (_hc->xfer_len > max_hc_xfer_size) /* Make sure that xfer_len is a multiple of max packet size. */ _hc->xfer_len = max_hc_xfer_size - _hc->max_packet + 1; if (_hc->xfer_len > 0) { num_packets = (_hc->xfer_len + _hc->max_packet - 1) / _hc->max_packet; if (num_packets > max_hc_pkt_count) { num_packets = max_hc_pkt_count; _hc->xfer_len = num_packets * _hc->max_packet; } } else /* Need 1 packet for transfer length of 0. */ num_packets = 1; if (_hc->ep_is_in) /* Always program an integral # of max packets for IN transfers. */ _hc->xfer_len = num_packets * _hc->max_packet; if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR || _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { /* * Make sure that the multi_count field matches the * actual transfer length. */ _hc->multi_count = num_packets; } if (_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { /* Set up the initial PID for the transfer. */ if (_hc->speed == DWC_OTG_EP_SPEED_HIGH) { if (_hc->ep_is_in) { if (_hc->multi_count == 1) _hc->data_pid_start = DWC_OTG_HC_PID_DATA0; else if (_hc->multi_count == 2) _hc->data_pid_start = DWC_OTG_HC_PID_DATA1; else _hc->data_pid_start = DWC_OTG_HC_PID_DATA2; } else { if (_hc->multi_count == 1) _hc->data_pid_start = DWC_OTG_HC_PID_DATA0; else _hc->data_pid_start = DWC_OTG_HC_PID_MDATA; } } else _hc->data_pid_start = DWC_OTG_HC_PID_DATA0; } hctsiz.b.xfersize = _hc->xfer_len; } _hc->start_pkt_count = num_packets; hctsiz.b.pktcnt = num_packets; hctsiz.b.pid = _hc->data_pid_start; dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num); DWC_DEBUGPL(DBG_HCDV, " Xfer Size: %d\n", hctsiz.b.xfersize); DWC_DEBUGPL(DBG_HCDV, " Num Pkts: %d\n", hctsiz.b.pktcnt); DWC_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid); #ifdef USE_INTERNAL_DMA #if defined(HOST_UNALIGNED_BUFFER_ADJUST) if(!_hc->ep_is_in) { _hc->using_aligned_tx_buf=0; if (_hc->xfer_len && (((unsigned long)_hc->xfer_buff) & 3)) { if(_hc->aligned_tx_buf && _hc->aligned_tx_buf_len && _hc->aligned_tx_buf_len < _hc->xfer_len) { usb_free_buf(_hc->aligned_tx_buf); _hc->aligned_tx_buf=NULL; _hc->aligned_tx_buf_len=0; } if(! _hc->aligned_tx_buf || !_hc->aligned_tx_buf_len) { _hc->aligned_tx_buf = usb_alloc_buf(_hc->xfer_len, 0); if(_hc->aligned_tx_buf) _hc->aligned_tx_buf_len = _hc->xfer_len; } if(_hc->aligned_tx_buf && _hc->aligned_tx_buf_len >= _hc->xfer_len) { memcpy(_hc->aligned_tx_buf, _hc->xfer_buff, _hc->xfer_len); dwc_write_reg32(&hc_regs->hcdma, (uint32_t)(CPHYSADDR(_hc->aligned_tx_buf))); _hc->using_aligned_tx_buf=1; } else DWC_WARN("%s():%d\n",__func__,__LINE__); } else { if((uint32_t)(_hc->xfer_buff) != (uint32_t)(KSEG1ADDR(_hc->xfer_buff))) dma_cache_wback_inv((unsigned long) _hc->xfer_buff, _hc->xfer_len); dwc_write_reg32(&hc_regs->hcdma, (uint32_t)(CPHYSADDR(_hc->xfer_buff))); } } else { _hc->using_aligned_rx_buf=0; if (((unsigned long)_hc->xfer_buff) & 3) { if( _hc->aligned_rx_buf && _hc->aligned_rx_buf_len && _hc->xfer_len > _hc->aligned_rx_buf_len ) { usb_free_buf(_hc->aligned_rx_buf); _hc->aligned_rx_buf=NULL; _hc->aligned_rx_buf_len=0; } if(! _hc->aligned_rx_buf || !_hc->aligned_rx_buf_len) { _hc->aligned_rx_buf = usb_alloc_buf(_hc->xfer_len, 1); if(_hc->aligned_rx_buf) _hc->aligned_rx_buf_len = _hc->xfer_len; } if(_hc->aligned_rx_buf) { dwc_write_reg32(&hc_regs->hcdma, (uint32_t)(CPHYSADDR(_hc->aligned_rx_buf))); _hc->using_aligned_rx_buf=1; } else DWC_WARN("%s():%d\n",__func__,__LINE__); } else { // if((uint32_t)(_hc->xfer_buff) != (uint32_t)(KSEG1ADDR(_hc->xfer_buff))) // dma_cache_wback_inv((unsigned long) _hc->xfer_buff, _hc->xfer_len); dwc_write_reg32(&hc_regs->hcdma, (uint32_t)(CPHYSADDR(_hc->xfer_buff))); } } #elif defined(HOST_UNALIGNED_BUFFER_CHECK) if ( !_hc->ep_is_in && _hc->xfer_len && (((unsigned long)_hc->xfer_buff) & 3)) DWC_WARN("UNALIGNED BUFFER in REQUEST\n"); else if ( !_hc->ep_is_in && (((unsigned long)_hc->xfer_buff) & 3)) DWC_WARN("UNALIGNED BUFFER in REQUEST\n"); dwc_write_reg32(&hc_regs->hcdma, (uint32_t)(CPHYSADDR(_hc->xfer_buff))); #else dwc_write_reg32(&hc_regs->hcdma, (uint32_t)(CPHYSADDR(_hc->xfer_buff))); #endif #endif //USE_INTERNAL_DMA /* Start the split */ if (_hc->do_split) { hcsplt_data_t hcsplt; hcsplt.d32 = dwc_read_reg32 (&hc_regs->hcsplt); hcsplt.b.spltena = 1; dwc_write_reg32(&hc_regs->hcsplt, hcsplt.d32); } hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.b.multicnt = _hc->multi_count; hc_set_even_odd_frame(_core_if, _hc, &hcchar); #ifdef DEBUG _core_if->start_hcchar_val[_hc->hc_num] = hcchar.d32; if (hcchar.b.chdis) DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n", __func__, _hc->hc_num, hcchar.d32); #endif /* Set host channel enable after all other setup is complete. */ hcchar.b.chen = 1; hcchar.b.chdis = 0; dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); _hc->xfer_started = 1; _hc->requests++; #ifndef USE_INTERNAL_DMA if (!_hc->ep_is_in && _hc->xfer_len > 0) /* Load OUT packet into the appropriate Tx FIFO. */ dwc_otg_hc_write_packet(_core_if, _hc); #endif #ifdef DEBUG /* Start a timer for this transfer. */ _core_if->hc_xfer_timer[_hc->hc_num].function = hc_xfer_timeout; _core_if->hc_xfer_info[_hc->hc_num].core_if = _core_if; _core_if->hc_xfer_info[_hc->hc_num].hc = _hc; _core_if->hc_xfer_timer[_hc->hc_num].data = (unsigned long)(&_core_if->hc_xfer_info[_hc->hc_num]); _core_if->hc_xfer_timer[_hc->hc_num].expires = jiffies + (HZ*10); add_timer(&_core_if->hc_xfer_timer[_hc->hc_num]); #endif } /* * This function continues a data transfer that was started by previous call * to dwc_otg_hc_start_transfer. The caller must ensure there is * sufficient space in the request queue and Tx Data FIFO. This function * should only be called in Slave mode. In DMA mode, the controller acts * autonomously to complete transfers programmed to a host channel. * * For an OUT transfer, a new data packet is loaded into the appropriate FIFO * if there is any data remaining to be queued. For an IN transfer, another * data packet is always requested. For the SETUP phase of a control transfer, * this function does nothing. * * @return 1 if a new request is queued, 0 if no more requests are required * for this transfer. */ int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) { DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num); #ifndef USE_INTERNAL_DMA if (_hc->do_split) /* SPLITs always queue just once per channel */ return 0; else if (_hc->data_pid_start == DWC_OTG_HC_PID_SETUP) /* SETUPs are queued only once since they can't be NAKed. */ return 0; else if (_hc->ep_is_in) { /* * Always queue another request for other IN transfers. If * back-to-back INs are issued and NAKs are received for both, * the driver may still be processing the first NAK when the * second NAK is received. When the interrupt handler clears * the NAK interrupt for the first NAK, the second NAK will * not be seen. So we can't depend on the NAK interrupt * handler to requeue a NAKed request. Instead, IN requests * are issued each time this function is called. When the * transfer completes, the extra requests for the channel will * be flushed. */ hcchar_data_t hcchar; dwc_otg_hc_regs_t *hc_regs = _core_if->host_if->hc_regs[_hc->hc_num]; hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hc_set_even_odd_frame(_core_if, _hc, &hcchar); hcchar.b.chen = 1; hcchar.b.chdis = 0; DWC_DEBUGPL(DBG_HCDV, " IN xfer: hcchar = 0x%08x\n", hcchar.d32); dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); _hc->requests++; return 1; } else { /* OUT transfers. */ if (_hc->xfer_count < _hc->xfer_len) { if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR || _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { hcchar_data_t hcchar; dwc_otg_hc_regs_t *hc_regs; hc_regs = _core_if->host_if->hc_regs[_hc->hc_num]; hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hc_set_even_odd_frame(_core_if, _hc, &hcchar); } /* Load OUT packet into the appropriate Tx FIFO. */ dwc_otg_hc_write_packet(_core_if, _hc); _hc->requests++; return 1; } else return 0; } #else return 0; #endif } /* * Starts a PING transfer. This function should only be called in Slave mode. * The Do Ping bit is set in the HCTSIZ register, then the channel is enabled. */ void dwc_otg_hc_do_ping(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) { #ifndef USE_INTERNAL_DMA hcchar_data_t hcchar; hctsiz_data_t hctsiz; dwc_otg_hc_regs_t *hc_regs = _core_if->host_if->hc_regs[_hc->hc_num]; DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num); hctsiz.d32 = 0; hctsiz.b.dopng = 1; hctsiz.b.pktcnt = 1; dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); hcchar.b.chen = 1; hcchar.b.chdis = 0; dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); #endif } /* * This function writes a packet into the Tx FIFO associated with the Host * Channel. For a channel associated with a non-periodic EP, the non-periodic * Tx FIFO is written. For a channel associated with a periodic EP, the * periodic Tx FIFO is written. This function should only be called in Slave * mode. * * Upon return the xfer_buff and xfer_count fields in _hc are incremented by * then number of bytes written to the Tx FIFO. */ void dwc_otg_hc_write_packet(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) { #ifndef USE_INTERNAL_DMA uint32_t i; uint32_t remaining_count; uint32_t byte_count; uint32_t dword_count; uint32_t *data_buff = (uint32_t *)(_hc->xfer_buff); uint32_t *data_fifo = _core_if->data_fifo[_hc->hc_num]; remaining_count = _hc->xfer_len - _hc->xfer_count; if (remaining_count > _hc->max_packet) byte_count = _hc->max_packet; else byte_count = remaining_count; dword_count = (byte_count + 3) / 4; if ((((unsigned long)data_buff) & 0x3) == 0) { /* xfer_buff is DWORD aligned. */ for (i = 0; i < dword_count; i++, data_buff++) dwc_write_reg32(data_fifo, *data_buff); } else { /* xfer_buff is not DWORD aligned. */ for (i = 0; i < dword_count; i++, data_buff++) dwc_write_reg32(data_fifo, get_unaligned(data_buff)); } _hc->xfer_count += byte_count; _hc->xfer_buff += byte_count; #endif } #endif // DWC_IS_HOST #ifdef DWC_IS_DEVICE /* * Gets the current USB frame number. This is the frame number from the last * SOF packet. */ uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *_core_if) { dsts_data_t dsts; dsts.d32 = dwc_read_reg32(&_core_if->dev_if->dev_global_regs->dsts); /* read current frame/microfreme number from DSTS register */ return dsts.b.soffn; } #if defined(_USB_LED_) #if 0 extern void usb_set_led_off(); extern void usb_set_led_on(); extern void usb_set_led_flash(); /** * Sets USB LED ON/OFF. */ uint32_t dwc_otg_set_usb_led(dwc_otg_core_if_t *_core_if, int is_on) { //is_on: 0, off; 1, on, 2 flash switch(is_on) { case 0: usb_set_led_off(); break; case 1: usb_set_led_on(); break; case 2: usb_set_led_flash(); break; } } #else extern void usb_set_led_off_action(); extern void usb_set_led_on_action(); extern void usb_set_led_flash_action(); /** * Sets USB LED ON/OFF. */ void dwc_otg_set_usb_led(dwc_otg_core_if_t *_core_if, int is_on) { //is_on: 0, off; 1, on, 2 flash switch(is_on) { case 0: usb_set_led_off_action(); break; case 1: usb_set_led_on_action(); break; case 2: usb_set_led_flash_action(); break; } } #endif #endif //_USB_LED_ #ifdef CONFIG_IFX_USB_LED #if defined(CONFIG_AMAZON_S) #include #include extern int dwc_core_num; /** * Sets USB LED ON/OFF. * is_on: 0, off; 1, on, 2 flash */ void dwc_otg_set_usb_led(dwc_otg_core_if_t *_core_if, int is_on){ struct led_config_param param = {0}; int bit = -1; switch(is_on){ case 0: // off case 1: // on case 2: // blink break; default: printk(KERN_ERR "Invalid led operation!!\n"); return; } if (dwc_core_num == 0) bit = 10; else bit = 7; if (is_on == 2) { param.operation_mask = CONFIG_OPERATION_BLINK; param.blink_mask = 1 << bit; param.blink = 1 << bit; bsp_led_config(¶m); } else { param.operation_mask = CONFIG_OPERATION_BLINK; param.blink_mask = 1 << bit; param.blink = 0 << bit; bsp_led_config(¶m); param.operation_mask = CONFIG_OPERATION_DATA; param.data_mask = 1 << bit; if (is_on == 1) param.data = 1 << bit; else param.data = 0 << bit; bsp_led_config(¶m); } } #endif #endif /** * This function reads a setup packet from the Rx FIFO into the destination * buffer. This function is called from the Rx Status Queue Level (RxStsQLvl) * Interrupt routine when a SETUP packet has been received in Slave mode. * * @param _core_if Programming view of DWC_otg controller. * @param _dest Destination buffer for packet data. */ void dwc_otg_read_setup_packet(dwc_otg_core_if_t *_core_if, uint32_t *_dest) { #ifndef USE_INTERNAL_DMA /* Get the 8 bytes of a setup transaction data */ /* Pop 2 DWORDS off the receive data FIFO into memory */ _dest[0] = dwc_read_reg32(_core_if->data_fifo[0]); _dest[1] = dwc_read_reg32(_core_if->data_fifo[0]); #endif } dwc_otg_core_if_t *tx_core_if=NULL; dwc_ep_t *cin_ep=NULL; dwc_ep_t *pin_ep=NULL; dwc_ep_t *din_ep=NULL; int cin_pending=0; int pin_pending=0; int din_pending=0; int cin_holding=0; int din_holding=0; int pin_next=0; int prevep=-1; int prevnext=-1; /* * This function enables EP0 OUT to receive SETUP packets and configures EP0 * IN for transmitting packets. It is normally called when the * "Enumeration Done" interrupt occurs. * * @param _core_if Programming view of DWC_otg controller. * @param _ep The EP0 data. */ void dwc_otg_ep0_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep) { dwc_otg_dev_if_t *dev_if = _core_if->dev_if; dsts_data_t dsts; depctl_data_t diepctl; depctl_data_t doepctl; dctl_data_t dctl ={.d32=0}; /* Read the Device Status and Endpoint 0 Control registers */ dsts.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dsts); diepctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl); doepctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl); /* Set the MPS of the IN EP based on the enumeration speed */ switch (dsts.b.enumspd) { case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ: case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ: case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ: diepctl.b.mps = DWC_DEP0CTL_MPS_64; break; case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ: diepctl.b.mps = DWC_DEP0CTL_MPS_8; break; } dwc_write_reg32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32); /* Enable OUT EP for receive */ doepctl.b.epena = 1; dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32); #ifdef VERBOSE DWC_DEBUGPL(DBG_PCDV,"doepctl0=%0x\n", dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); DWC_DEBUGPL(DBG_PCDV,"diepctl0=%0x\n", dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl)); #endif dctl.b.cgnpinnak = 1; dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32); DWC_DEBUGPL(DBG_PCDV,"dctl=%0x\n", dwc_read_reg32(&dev_if->dev_global_regs->dctl)); cin_ep=_ep; tx_core_if=_core_if; } /* * This function activates an EP. The Device EP control register for * the EP is configured as defined in the ep structure. Note: This * function is not used for EP0. * * @param _core_if Programming view of DWC_otg controller. * @param _ep The EP to activate. */ void dwc_otg_ep_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep) { dwc_otg_dev_if_t *dev_if = _core_if->dev_if; depctl_data_t depctl; volatile uint32_t *addr; daint_data_t daintmsk ={.d32=0}; DWC_DEBUGPL(DBG_PCDV, "%s() EP%d-%s\n", __func__, _ep->num, (_ep->is_in?"IN":"OUT")); /* Read DEPCTLn register */ if (_ep->is_in == 1) { addr = &dev_if->in_ep_regs[_ep->num]->diepctl; daintmsk.ep.in = 1<<_ep->num; } else { addr = &dev_if->out_ep_regs[_ep->num]->doepctl; daintmsk.ep.out = 1<<_ep->num; } /* If the EP is already active don't change the EP Control register. */ depctl.d32 = dwc_read_reg32(addr); if (!depctl.b.usbactep) { depctl.b.mps = _ep->maxpacket; depctl.b.eptype = _ep->type; depctl.b.txfnum = _ep->tx_fifo_num; if (_ep->type != DWC_OTG_EP_TYPE_ISOC) depctl.b.setd0pid = 1; depctl.b.usbactep = 1; if(depctl.b.epena) depctl.b.epdis=1; else depctl.b.epdis=0; depctl.b.snak = 1; depctl.b.cnak = 0; dwc_write_reg32(addr, depctl.d32); DWC_DEBUGPL(DBG_PCDV,"DEPCTL=%08x\n", dwc_read_reg32(addr)); } /* Enable the Interrupt for this EP */ dwc_modify_reg32(&dev_if->dev_global_regs->daintmsk, 0, daintmsk.d32); DWC_DEBUGPL(DBG_PCDV,"DAINTMSK=%0x\n", dwc_read_reg32(&dev_if->dev_global_regs->daintmsk)); if (_ep->is_in == 1 && _ep->type == DWC_OTG_EP_TYPE_BULK) din_ep=_ep; if (_ep->is_in == 1 && _ep->type == DWC_OTG_EP_TYPE_INTR) pin_ep=_ep; tx_core_if=_core_if; return; } /* * This function deactivates an EP. This is done by clearing the USB Active * EP bit in the Device EP control register. Note: This function is not used * for EP0. EP0 cannot be deactivated. * * @param _core_if Programming view of DWC_otg controller. * @param _ep The EP to deactivate. */ void set_din_holding() { depctl_data_t depctl; dwc_otg_pcd_t *pcd; dwc_otg_pcd_ep_t *pcd_ep; pcd_ep=container_of(cin_ep,dwc_otg_pcd_ep_t,dwc_ep); pcd=pcd_ep->pcd; din_holding=1; if(din_ep && !din_pending ) // when there is none pending, check if current running { depctl.d32 = dwc_read_reg32 (&tx_core_if->dev_if->in_ep_regs[din_ep->num]->diepctl); if( depctl.b.usbactep && depctl.b.epena // ready or already start ) { dctl_data_t dctl; gintmsk_data_t intr_mask = {.d32 = 0}; cin_holding=1; din_pending = 1; DWC_DEBUGPL(DBG_PCDV, "STOPING DIN\n"); pcd->gnaksource=1; pcd->gnaknext=0; dctl.d32=0; dctl.b.sgnpinnak = 1; dwc_modify_reg32(&tx_core_if->dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32); intr_mask.b.ginnakeff = 1; dwc_modify_reg32( &tx_core_if->core_global_regs->gintmsk, 0, intr_mask.d32); // We only send a gnak, then the disable will be issued by nakeff return; } } } void clr_din_holding() { din_holding=0; } void set_cin_holding() { cin_holding=1; } void clr_cin_holding() { cin_holding=0; } void set_pin_next() { pin_next=1; } void clr_pin_next() { pin_next=0; } void dwc_otg_ep_deactivate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep) { depctl_data_t depctl ={.d32 = 0}; volatile uint32_t *addr; daint_data_t daintmsk ={.d32=0}; if(_ep==pin_ep) { pin_ep=NULL; pin_next=0; pin_pending=0; } if(_ep==cin_ep) { cin_ep=NULL; cin_pending=0; cin_holding=0; } if(_ep==din_ep) { din_ep=NULL; din_pending=0; din_holding=0; } /* Read DEPCTLn register */ if (_ep->is_in == 1) { addr = &_core_if->dev_if->in_ep_regs[_ep->num]->diepctl; daintmsk.ep.in = 1<<_ep->num; } else { addr = &_core_if->dev_if->out_ep_regs[_ep->num]->doepctl; daintmsk.ep.out = 1<<_ep->num; } depctl.b.usbactep = 0; dwc_write_reg32(addr, depctl.d32); /* Disable the Interrupt for this EP */ dwc_modify_reg32(&_core_if->dev_if->dev_global_regs->daintmsk, daintmsk.d32, 0); return; } void dump_msg(const u8 *buf, unsigned int length); #ifdef CONFIG_AMAZON_SE #include #endif #ifdef USE_PERIODIC_EP void dwc_otg_ep_stop_perio() { if(pin_ep) { depctl_data_t depctl; dwc_otg_dev_in_ep_regs_t *in_regs = tx_core_if->dev_if->in_ep_regs[pin_ep->num]; deptsiz_data_t sz; depctl.d32 = dwc_read_reg32(&in_regs->diepctl); if(depctl.b.epena) { unsigned long count; diepint_data_t it; dwc_write_reg32 (&in_regs->dieptsiz,0); count=0; depctl.d32=0; depctl.b.snak = 1; dwc_modify_reg32(&in_regs->diepctl,0,depctl.d32); do{ mdelay(1); count++; it.d32=dwc_read_reg32(&in_regs->diepint); }while(it.b.inepnakeff == 0 && count < 1000); count=0; depctl.d32=0; depctl.b.snak = 1; depctl.b.epdis = 1; dwc_modify_reg32(&in_regs->diepctl,0,depctl.d32); do{ mdelay(1); count++; depctl.d32 = dwc_read_reg32 (&in_regs->diepctl); }while(depctl.b.epdis == 1 && count < 1000); dwc_otg_flush_tx_fifo( tx_core_if,pin_ep->tx_fifo_num); } } } #endif int restart_din(void) { if (din_ep && din_pending && !din_holding) { #ifdef DEBUG { depctl_data_t depctl; depctl.d32 = dwc_read_reg32(&tx_core_if->dev_if->in_ep_regs[cin_ep->num]->diepctl); if(depctl.b.nextep!=din_ep->num) DWC_DEBUGPL(DBG_PCDV, "%s() WARNING CIN-NEXT IS NOT DIN\n",__func__); } #endif din_pending=0; if(din_ep->xfer_len && din_ep->xfer_len >din_ep->xfer_count) { //printk(KERN_INFO "%s() Size=%d\n",__func__,din_ep->xfer_len); dwc_otg_ep_start_transfer(tx_core_if, din_ep); return 1; } } return 0; } int restart_cin(void) { if (cin_ep && cin_pending) { cin_pending=0; dwc_otg_ep_start_transfer(tx_core_if, cin_ep); return 1; } return 0; } int restart_pin(void) { if (pin_ep && pin_pending) { pin_pending=0; dwc_otg_ep_start_transfer(tx_core_if, pin_ep); return 1; } return 0; } void dwc_otg_jump_ep(dwc_otg_core_if_t *_core_if, int from, uint8_t epnum) { depctl_data_t ctl; int i; if(from<0) { for (i=0; i < _core_if->dev_if->num_eps; i++) { ctl.d32 = dwc_read_reg32 (&_core_if->dev_if->in_ep_regs[i]->diepctl); if(ctl.b.usbactep && ctl.b.txfnum==0) { ctl.b.nextep = epnum; ctl.b.snak = 0; ctl.b.cnak = 0; ctl.b.epena = 0; ctl.b.epdis = 0; dwc_write_reg32 (&_core_if->dev_if->in_ep_regs[i]->diepctl,ctl.d32); } } } else { ctl.d32 = dwc_read_reg32 (&_core_if->dev_if->in_ep_regs[from]->diepctl); ctl.b.nextep = epnum; ctl.b.snak = 0; ctl.b.cnak = 0; ctl.b.epena = 0; ctl.b.epdis = 0; dwc_write_reg32 (&_core_if->dev_if->in_ep_regs[from]->diepctl,ctl.d32); } } void dwc_otg_ep_start_tx_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep, signed char next) { depctl_data_t depctl; deptsiz_data_t deptsiz; deptsiz0_data_t deptsiz0; // gintmsk_data_t intr_mask ={ .d32 = 0}; uint32_t num_packets=0; // diepmsk_data_t diepmsk; dwc_otg_pcd_t *pcd; dwc_otg_pcd_ep_t *pcd_ep; dwc_otg_dev_in_ep_regs_t *in_regs = _core_if->dev_if->in_ep_regs[_ep->num]; gnptxsts_data_t txstatus; pcd_ep=container_of(cin_ep,dwc_otg_pcd_ep_t,dwc_ep); pcd=pcd_ep->pcd; #ifdef CHECK_PACKET_COUNTER_WIDTH const uint32_t MAX_XFER_SIZE = _core_if->core_params->max_transfer_size; const uint32_t MAX_PKT_COUNT = _core_if->core_params->max_packet_count; #endif DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__); DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_cnt=%d/%d " "xfer_buff=%p/%p\n", _ep->num, (_ep->is_in?"IN":"OUT"), _ep->xfer_count, _ep->xfer_len, _ep->xfer_buff, _ep->start_xfer_buff); ////////////////////////////////////////////////////// DWC_DEBUGPL(DBG_PCD, "[T%d] Start",_ep->num); if (_ep->type == DWC_OTG_EP_TYPE_CONTROL ) //CTRL { if(cin_holding) { cin_pending=1; return; } dwc_otg_jump_ep(_core_if, 1, _ep->num); } else if (_ep->type == DWC_OTG_EP_TYPE_INTR ) //Perio { #ifdef USE_PERIODIC_EP dwc_otg_ep_stop_perio(); #else dwc_otg_jump_ep(_core_if, 0, _ep->num); #endif } else //Non-Perio { if(din_pending || din_holding) // CTRL stop or prevent the DIN { //if(_ep->num==1) printk(KERN_INFO "[T] len=%d Holding %s %s\n",_ep->xfer_len,(din_pending)?"pend":"",(din_holding)?"hold":""); din_pending = 1; // We need to get back even there is none previously return; } depctl.d32 = dwc_read_reg32 (&_core_if->dev_if->in_ep_regs[cin_ep->num]->diepctl); if( depctl.b.epena ) { //if(_ep->num==1) printk(KERN_INFO "[T] len=%d ENABLED\n",_ep->xfer_len); din_pending = 1; // We need to get back even there is none previously return; } dwc_otg_jump_ep(_core_if, 0, _ep->num); } ////////////////////////////////////////////////////// _ep->len_in_xfer=0; if(_ep->xfer_len>0) _ep->len_in_xfer = _ep->xfer_len - _ep->xfer_count; if (_ep->num == 0 || _ep->tx_fifo_num ) { if(_ep->len_in_xfer > _ep->maxpacket) _ep->len_in_xfer = _ep->maxpacket; } #ifdef CHECK_PACKET_COUNTER_WIDTH else //Non-Perio { if(_ep->len_in_xfer > MAX_XFER_SIZE) _ep->len_in_xfer = MAX_XFER_SIZE; } #endif num_packets = (_ep->len_in_xfer + _ep->maxpacket - 1) / _ep->maxpacket; if(num_packets==0) num_packets = 1; #ifdef CHECK_PACKET_COUNTER_WIDTH if (num_packets > MAX_PKT_COUNT) { num_packets = MAX_PKT_COUNT; _ep->len_in_xfer = num_packets * _ep->maxpacket; } #endif _ep->packet_in_xfer = num_packets; if(_ep->tx_fifo_num == 0) //ep using fifo 0 { txstatus.d32 = dwc_read_reg32(&_core_if->core_global_regs->gnptxsts); #if 0 // method 1: all tx should wait for FIFO empty if (txstatus.b.nptxqspcavail != _core_if->hwcfg2.b.nonperio_tx_q_depth) { #if 1 // Local wait unsigned long countdown=10; while (txstatus.b.nptxqspcavail != _core_if->hwcfg2.b.nonperio_tx_q_depth && countdown >0) { mdelay(1); txstatus.d32 = dwc_read_reg32(&_core_if->core_global_regs->gnptxsts); countdown--; } if(!countdown) { DWC_PRINT("TX Queue Not Empty (0x%0x)\n", txstatus.d32); if(pin_ep && _ep==pin_ep){ DWC_PRINT("TX Queue Not Empty PIN\n"); pin_pending=1; } if(cin_ep && _ep==cin_ep){ DWC_PRINT("TX Queue Not Empty CIN\n"); cin_pending=1; } //if(_ep->num==1) printk(KERN_INFO "[T] len=%d QUEUE FULL\n",_ep->xfer_len); return; } #else return; #endif } #endif #if 1 // method 2: all tx should wait for FIFO not full if (txstatus.b.nptxqspcavail == 0) { #if 1 // Local wait unsigned long countdown=1000; // 090323 HOWARD: IMPROVE THROUGHPUT while (txstatus.b.nptxqspcavail == 0 && countdown >0) { udelay(1); // 090323 HOWARD: IMPROVE THROUGHPUT txstatus.d32 = dwc_read_reg32(&_core_if->core_global_regs->gnptxsts); countdown--; } if(!countdown) { DWC_PRINT("TX Queue Full (0x%0x)\n", txstatus.d32); return; } #else return; #endif } #endif } else dwc_otg_flush_tx_fifo( _core_if,_ep->tx_fifo_num ); DWC_DEBUGPL(DBG_PCD, "transfer_len=%d #pckt=%d\n", _ep->xfer_len, num_packets); #ifdef USE_PERIODIC_EP if(_ep->tx_fifo_num && !num_packets ) return; #endif if (_ep->num == 0 ) //CTRL { deptsiz0.d32 = dwc_read_reg32(&(in_regs->dieptsiz)); deptsiz0.b.xfersize = _ep->len_in_xfer; deptsiz0.b.pktcnt = num_packets; dwc_write_reg32(&in_regs->dieptsiz, deptsiz0.d32); } else { deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz)); deptsiz.b.xfersize = _ep->len_in_xfer; deptsiz.b.pktcnt = num_packets; if (_ep->len_in_xfer == 0) deptsiz.b.mc=0; else if(num_packets<4) deptsiz.b.mc=num_packets; else deptsiz.b.mc=3; dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); } #ifdef USE_INTERNAL_DMA /* Write the DMA register */ if (deptsiz.b.xfersize > 0) { if((uint32_t)(_ep->xfer_buff) & 3) printk(KERN_INFO "%s WARNING (IN) UNALIGNED DMA :%08x len:%x\n",__func__,(uint32_t)(_ep->xfer_buff),_ep->xfer_len); if(_ep->xfer_buff && _ep->xfer_buff==(uint8_t *)(KSEG0ADDR(_ep->xfer_buff))) dma_cache_wback_inv((unsigned long) _ep->xfer_buff, deptsiz.b.xfersize); dwc_write_reg32 (&(in_regs->diepdma), (uint32_t)(CPHYSADDR(_ep->xfer_buff))); } #endif //if(_ep->num==1) printk(KERN_INFO "[T] DO len=%d pckt=%d\n",deptsiz.b.xfersize, num_packets); ////////////////////////////////////////////////////// depctl.d32 = dwc_read_reg32(&in_regs->diepctl); if(next>=0) depctl.b.nextep=next; else { if (_ep->type == DWC_OTG_EP_TYPE_CONTROL ) //CTRL { if(_ep->xfer_count) depctl.b.setd1pid=1; if(pin_next) { depctl.b.nextep=pin_ep->num; pin_next=0; } else if(din_ep && (_ep->xfer_count+_ep->maxpacket)>= _ep->xfer_len && !din_holding ) depctl.b.nextep=din_ep->num; else depctl.b.nextep=0; } else if (_ep->type == DWC_OTG_EP_TYPE_INTR ) //Perio depctl.b.nextep=0; else //Non-Perio depctl.b.nextep=_ep->num; } if (_ep->tx_fifo_num ==0){ prevep=_ep->num; prevnext=depctl.b.nextep; } /* EP enable, IN data in FIFO */ depctl.b.snak = 0; depctl.b.cnak = 1; depctl.b.epena = 1; depctl.b.epdis = 0; dwc_write_reg32(&in_regs->diepctl, depctl.d32); ////////////////////////////////////////////////////// #ifdef USE_INTERNAL_DMA #ifdef USE_PERIODIC_EP if(_ep->tx_fifo_num ) { intr_mask.b.eopframe = 1; dwc_modify_reg32( &_core_if->core_global_regs->gintmsk,0, intr_mask.d32 ); } #endif #else /* * Enable the Non-Periodic Tx FIFO empty interrupt, * the data will be written into the fifo by the ISR. */ if(_ep->tx_fifo_num == 0) //ep using fifo 0 { intr_mask.b.nptxfempty = 1; dwc_modify_reg32( &_core_if->core_global_regs->gintsts, intr_mask.d32, 0); dwc_modify_reg32( &_core_if->core_global_regs->gintmsk, intr_mask.d32, intr_mask.d32); } #endif } void dwc_otg_ep_start_rx_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep) { depctl_data_t depctl; deptsiz_data_t deptsiz; deptsiz0_data_t deptsiz0; // dctl_data_t dctl; // gintmsk_data_t intr_mask ={ .d32 = 0}; uint32_t num_packets=0; // doepmsk_data_t doepmsk; dwc_otg_dev_out_ep_regs_t *out_regs = _core_if->dev_if->out_ep_regs[_ep->num]; #ifdef CHECK_PACKET_COUNTER_WIDTH const uint32_t MAX_XFER_SIZE = _core_if->core_params->max_transfer_size; const uint32_t MAX_PKT_COUNT = _core_if->core_params->max_packet_count; #endif DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__); DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_cnt=%d/%d " "xfer_buff=%p/%p\n", _ep->num, (_ep->is_in?"IN":"OUT"), _ep->xfer_count, _ep->xfer_len, _ep->xfer_buff, _ep->start_xfer_buff); if(_ep->num && _ep->xfer_len>0) { int sz; sz = _ep->xfer_len - _ep->xfer_count; #ifdef CHECK_PACKET_COUNTER_WIDTH if (sz > MAX_XFER_SIZE) sz = MAX_XFER_SIZE; #endif num_packets = (sz + _ep->maxpacket - 1) / _ep->maxpacket; #ifdef CHECK_PACKET_COUNTER_WIDTH if (num_packets > MAX_XFER_SIZE) num_packets = MAX_PKT_COUNT; #endif } if(num_packets==0) num_packets = 1; if(_ep->num==0) num_packets = 1; _ep->len_in_xfer = num_packets * _ep->maxpacket; _ep->packet_in_xfer = num_packets; if(_ep->num==0) { deptsiz0.d32 = dwc_read_reg32(&(out_regs->doeptsiz)); deptsiz0.b.supcnt = 3; //howard deptsiz0.b.xfersize = _ep->len_in_xfer; deptsiz0.b.pktcnt = num_packets; dwc_write_reg32(&out_regs->doeptsiz, deptsiz0.d32); //printk(KERN_INFO "%s() doeptsiz0= %08x\n",__func__,dwc_read_reg32(&out_regs->doeptsiz)); } else { deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz)); deptsiz.b.xfersize = _ep->len_in_xfer; deptsiz.b.pktcnt = num_packets; dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); } /* Write the DMA register */ #ifdef USE_INTERNAL_DMA if((uint32_t)(_ep->xfer_buff) & 3) printk(KERN_INFO "%s WARNING (OUT) UNALIGNED DMA :%08x\n",__func__,(uint32_t)(_ep->xfer_buff)); if(_ep->xfer_buff && _ep->xfer_buff==(uint8_t *)(KSEG0ADDR(_ep->xfer_buff))) dma_cache_wback_inv((unsigned long) _ep->xfer_buff, deptsiz.b.xfersize); dwc_write_reg32 (&(out_regs->doepdma), (uint32_t)(CPHYSADDR(_ep->xfer_buff))); #endif { doepint_data_t doepint = {.d32=0}; doepint.b.outtknepdis = 1; dwc_write_reg32(&out_regs->doepint, doepint.d32); } depctl.d32 = dwc_read_reg32(&out_regs->doepctl); if (_ep->type == DWC_OTG_EP_TYPE_ISOC) depctl.b.dpid = _ep->even_odd_frame; depctl.b.snak = 0; depctl.b.cnak = 1; depctl.b.epena = 1; depctl.b.epdis = 0; dwc_write_reg32(&out_regs->doepctl, depctl.d32); } void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep) { DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__); DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_cnt=%d/%d " "xfer_buff=%p/%p\n", _ep->num, (_ep->is_in?"IN":"OUT"), _ep->xfer_count, _ep->xfer_len, _ep->xfer_buff, _ep->start_xfer_buff); /* IN endpoint */ if (_ep->is_in == 1) dwc_otg_ep_start_tx_transfer(_core_if, _ep,-1); else dwc_otg_ep_start_rx_transfer(_core_if, _ep); } #endif // DWC_IS_DEVICE #ifdef DEBUG void dump_msg(const u8 *buf, unsigned int length) { unsigned int start, num, i; char line[52], *p; if (length >= 512) return; start = 0; while (length > 0) { num = min(length, 16u); p = line; for (i = 0; i < num; ++i) { if (i == 8) *p++ = ' '; sprintf(p, " %02x", buf[i]); p += 3; } *p = 0; DWC_PRINT( "%6x: %s\n", start, line); buf += num; start += num; length -= num; } } #else inline void dump_msg(const u8 *buf, unsigned int length) { } #endif /* * This function writes a packet into the Tx FIFO associated with the * EP. For non-periodic EPs the non-periodic Tx FIFO is written. For * periodic EPs the periodic Tx FIFO associated with the EP is written * with all packets for the next micro-frame. * * @param _core_if Programming view of DWC_otg controller. * @param _ep The EP to write packet for. * @param _dma Indicates if DMA is being used. */ #ifndef USE_INTERNAL_DMA void dwc_otg_ep_write_packet(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep) { /* * The buffer is padded to DWORD on a per packet basis in * slave/dma mode if the MPS is not DWORD aligned. The last * packet, if short, is also padded to a multiple of DWORD. * * ep->xfer_buff always starts DWORD aligned in memory and is a * multiple of DWORD in length * * ep->xfer_len can be any number of bytes * * ep->xfer_count is a multiple of ep->maxpacket until the last * packet * * FIFO access is DWORD */ uint32_t i; uint32_t byte_count; uint32_t dword_count; uint32_t *fifo; uint32_t *data_buff = (uint32_t *)_ep->xfer_buff; //DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p)\n", __func__, _core_if, _ep); if (_ep->xfer_count >= _ep->xfer_len) { DWC_WARN("%s() No data for EP%d!!!\n", __func__, _ep->num); return; } /* Find the byte length of the packet either short packet or MPS */ if ((_ep->xfer_len - _ep->xfer_count) < _ep->maxpacket) byte_count = _ep->xfer_len - _ep->xfer_count; else byte_count = _ep->maxpacket; /* Find the DWORD length, padded by extra bytes as neccessary if MPS * is not a multiple of DWORD */ dword_count = (byte_count + 3) / 4; #ifdef VERBOSE dump_msg(_ep->xfer_buff, byte_count); #endif if (_ep->type == DWC_OTG_EP_TYPE_ISOC) fifo = _core_if->data_fifo[_ep->tx_fifo_num]; else fifo = _core_if->data_fifo[_ep->num]; DWC_DEBUGPL((DBG_PCDV|DBG_CILV), "fifo=%p buff=%p *p=%08x bc=%d\n", fifo, data_buff, *data_buff, byte_count); for (i=0; ixfer_count += byte_count; _ep->xfer_buff += byte_count; } #endif #ifdef DWC_IS_DEVICE /* * Set the EP STALL. * * @param _core_if Programming view of DWC_otg controller. * @param _ep The EP to set the stall on. */ void dwc_otg_ep_set_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep) { depctl_data_t depctl; volatile uint32_t *depctl_addr; DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, _ep->num, (_ep->is_in?"IN":"OUT")); if (_ep->is_in == 1) { depctl_addr = &(_core_if->dev_if->in_ep_regs[_ep->num]->diepctl); depctl.d32 = dwc_read_reg32(depctl_addr); /* set the disable and stall bits */ if (depctl.b.epena) depctl.b.epdis = 1; depctl.b.stall = 1; dwc_write_reg32(depctl_addr, depctl.d32); } else { depctl_addr = &(_core_if->dev_if->out_ep_regs[_ep->num]->doepctl); depctl.d32 = dwc_read_reg32(depctl_addr); /* set the stall bit */ depctl.b.stall = 1; dwc_write_reg32(depctl_addr, depctl.d32); } DWC_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",dwc_read_reg32(depctl_addr)); return; } /* * Clear the EP STALL. * * @param _core_if Programming view of DWC_otg controller. * @param _ep The EP to clear stall from. */ void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep) { depctl_data_t depctl; volatile uint32_t *depctl_addr; DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, _ep->num, (_ep->is_in?"IN":"OUT")); if (_ep->is_in == 1) depctl_addr = &(_core_if->dev_if->in_ep_regs[_ep->num]->diepctl); else depctl_addr = &(_core_if->dev_if->out_ep_regs[_ep->num]->doepctl); depctl.d32 = dwc_read_reg32(depctl_addr); /* clear the stall bits */ depctl.b.stall = 0; /* * USB Spec 9.4.5: For endpoints using data toggle, regardless * of whether an endpoint has the Halt feature set, a * ClearFeature(ENDPOINT_HALT) request always results in the * data toggle being reinitialized to DATA0. */ if (_ep->type == DWC_OTG_EP_TYPE_INTR || _ep->type == DWC_OTG_EP_TYPE_BULK) depctl.b.setd0pid = 1; /* DATA0 */ dwc_write_reg32(depctl_addr, depctl.d32); DWC_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",dwc_read_reg32(depctl_addr)); return; } #endif /* * This function reads a packet from the Rx FIFO into the destination * buffer. To read SETUP data use dwc_otg_read_setup_packet. * * @param _core_if Programming view of DWC_otg controller. * @param _dest Destination buffer for the packet. * @param _bytes Number of bytes to copy to the destination. */ #ifndef USE_INTERNAL_DMA void dwc_otg_read_packet(dwc_otg_core_if_t *_core_if, uint8_t *_dest, uint16_t _bytes) { int i; int word_count = (_bytes + 3) / 4; volatile uint32_t *fifo = _core_if->data_fifo[0]; uint32_t *data_buff = (uint32_t *)_dest; DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p,%d)\n", __func__, _core_if, _dest, _bytes); for (i=0; idev_if->dev_global_regs->dcfg); addr=&_core_if->dev_if->dev_global_regs->dcfg; DWC_PRINT("DCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->dev_if->dev_global_regs->dctl; DWC_PRINT("DCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->dev_if->dev_global_regs->dsts; DWC_PRINT("DSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->dev_if->dev_global_regs->diepmsk; DWC_PRINT("DIEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->dev_if->dev_global_regs->doepmsk; DWC_PRINT("DOEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->dev_if->dev_global_regs->daint; DWC_PRINT("DAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->dev_if->dev_global_regs->daintmsk; DWC_PRINT("DAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->dev_if->dev_global_regs->dtknqr1; DWC_PRINT("DTKNQR1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); if (_core_if->hwcfg2.b.dev_token_q_depth > 6) { addr=&_core_if->dev_if->dev_global_regs->dtknqr2; DWC_PRINT("DTKNQR2 @0x%08X : 0x%08X\n", (uint32_t)addr,dwc_read_reg32(addr)); } addr=&_core_if->dev_if->dev_global_regs->dvbusdis; DWC_PRINT("DVBUSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->dev_if->dev_global_regs->dvbuspulse; DWC_PRINT("DVBUSPULSE @0x%08X : 0x%08X\n", (uint32_t)addr,dwc_read_reg32(addr)); if (_core_if->hwcfg2.b.dev_token_q_depth > 14) { addr=&_core_if->dev_if->dev_global_regs->dtknqr3; DWC_PRINT("DTKNQR3 @0x%08X : 0x%08X\n", (uint32_t)addr, dwc_read_reg32(addr)); } if (_core_if->hwcfg2.b.dev_token_q_depth > 22) { addr=&_core_if->dev_if->dev_global_regs->dtknqr4; DWC_PRINT("DTKNQR4 @0x%08X : 0x%08X\n", (uint32_t)addr, dwc_read_reg32(addr)); } for (i=0; i< _core_if->dev_if->num_eps; i++) { DWC_PRINT("Device IN EP %d Registers\n", i); addr=&_core_if->dev_if->in_ep_regs[i]->diepctl; DWC_PRINT("DIEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->dev_if->in_ep_regs[i]->diepint; DWC_PRINT("DIEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->dev_if->in_ep_regs[i]->dieptsiz; DWC_PRINT("DIETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->dev_if->in_ep_regs[i]->diepdma; DWC_PRINT("DIEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); DWC_PRINT("Device OUT EP %d Registers\n", i); addr=&_core_if->dev_if->out_ep_regs[i]->doepctl; DWC_PRINT("DOEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->dev_if->out_ep_regs[i]->doepfn; DWC_PRINT("DOEPFN @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->dev_if->out_ep_regs[i]->doepint; DWC_PRINT("DOEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->dev_if->out_ep_regs[i]->doeptsiz; DWC_PRINT("DOETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->dev_if->out_ep_regs[i]->doepdma; DWC_PRINT("DOEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); } return; } #endif #ifdef DWC_IS_HOST /* * This function reads the host registers and prints them * * @param _core_if Programming view of DWC_otg controller. */ void dwc_otg_dump_host_registers(dwc_otg_core_if_t *_core_if) { int i; volatile uint32_t *addr; DWC_PRINT("Host Global Registers\n"); addr=&_core_if->host_if->host_global_regs->hcfg; DWC_PRINT("HCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->host_if->host_global_regs->hfir; DWC_PRINT("HFIR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->host_if->host_global_regs->hfnum; DWC_PRINT("HFNUM @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->host_if->host_global_regs->hptxsts; DWC_PRINT("HPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->host_if->host_global_regs->haint; DWC_PRINT("HAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->host_if->host_global_regs->haintmsk; DWC_PRINT("HAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=_core_if->host_if->hprt0; DWC_PRINT("HPRT0 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); for (i=0; i<_core_if->core_params->host_channels; i++) { DWC_PRINT("Host Channel %d Specific Registers\n", i); addr=&_core_if->host_if->hc_regs[i]->hcchar; DWC_PRINT("HCCHAR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->host_if->hc_regs[i]->hcsplt; DWC_PRINT("HCSPLT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->host_if->hc_regs[i]->hcint; DWC_PRINT("HCINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->host_if->hc_regs[i]->hcintmsk; DWC_PRINT("HCINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->host_if->hc_regs[i]->hctsiz; DWC_PRINT("HCTSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->host_if->hc_regs[i]->hcdma; DWC_PRINT("HCDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); } return; } #endif /* * This function reads the core global registers and prints them * * @param _core_if Programming view of DWC_otg controller. */ void dwc_otg_dump_global_registers(dwc_otg_core_if_t *_core_if) { int i; volatile uint32_t *addr; DWC_PRINT("Core Global Registers\n"); addr=&_core_if->core_global_regs->gotgctl; DWC_PRINT("GOTGCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->gotgint; DWC_PRINT("GOTGINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->gahbcfg; DWC_PRINT("GAHBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->gusbcfg; DWC_PRINT("GUSBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->grstctl; DWC_PRINT("GRSTCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->gintsts; DWC_PRINT("GINTSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->gintmsk; DWC_PRINT("GINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->grxstsr; DWC_PRINT("GRXSTSR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); //addr=&_core_if->core_global_regs->grxstsp; //DWC_PRINT("GRXSTSP @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->grxfsiz; DWC_PRINT("GRXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->gnptxfsiz; DWC_PRINT("GNPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->gnptxsts; DWC_PRINT("GNPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->gi2cctl; DWC_PRINT("GI2CCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->gpvndctl; DWC_PRINT("GPVNDCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->ggpio; DWC_PRINT("GGPIO @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->guid; DWC_PRINT("GUID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->gsnpsid; DWC_PRINT("GSNPSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->ghwcfg1; DWC_PRINT("GHWCFG1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->ghwcfg2; DWC_PRINT("GHWCFG2 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->ghwcfg3; DWC_PRINT("GHWCFG3 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->ghwcfg4; DWC_PRINT("GHWCFG4 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); addr=&_core_if->core_global_regs->hptxfsiz; DWC_PRINT("HPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); for (i=0; i< 5; i++) { addr=&_core_if->core_global_regs->dptxfsiz[i]; DWC_PRINT("DPTXFSIZ[%d] @0x%08X : 0x%08X\n",i,(uint32_t)addr,dwc_read_reg32(addr)); } } //#endif /* * Flush a Tx FIFO. * * @param _core_if Programming view of DWC_otg controller. * @param _num Tx FIFO to flush. */ extern void dwc_otg_flush_tx_fifo( dwc_otg_core_if_t *_core_if, const int _num ) { dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs; volatile grstctl_t greset ={ .d32 = 0}; int count = 0; DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "Flush Tx FIFO %d\n", _num); greset.b.intknqflsh=1; greset.b.txfflsh = 1; greset.b.txfnum = _num; dwc_write_reg32( &global_regs->grstctl, greset.d32 ); do { greset.d32 = dwc_read_reg32( &global_regs->grstctl); if (++count > 10000&&(_num==0 ||_num==0x10)) { DWC_WARN("%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n", __func__, greset.d32, dwc_read_reg32( &global_regs->gnptxsts)); break; } } while (greset.b.txfflsh == 1); /* Wait for 3 PHY Clocks*/ UDELAY(1); } /* * Flush Rx FIFO. * * @param _core_if Programming view of DWC_otg controller. */ extern void dwc_otg_flush_rx_fifo( dwc_otg_core_if_t *_core_if ) { dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs; volatile grstctl_t greset ={ .d32 = 0}; int count = 0; DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "%s\n", __func__); greset.b.rxfflsh = 1; dwc_write_reg32( &global_regs->grstctl, greset.d32 ); do { greset.d32 = dwc_read_reg32( &global_regs->grstctl); if (++count > 10000) { DWC_WARN("%s() HANG! GRSTCTL=%0x\n", __func__, greset.d32); break; } } while (greset.b.rxfflsh == 1); /* Wait for 3 PHY Clocks*/ UDELAY(1); } /* * Do core a soft reset of the core. Be careful with this because it * resets all the internal state machines of the core. */ int dwc_otg_core_reset(dwc_otg_core_if_t *_core_if) { dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs; volatile grstctl_t greset ={ .d32 = 0}; int count = 0; DWC_DEBUGPL(DBG_CILV, "%s\n", __func__); /* Wait for AHB master IDLE state. */ do { UDELAY(10); greset.d32 = dwc_read_reg32( &global_regs->grstctl); if (++count > 100000) { DWC_WARN("%s() HANG! AHB Idle GRSTCTL=%0x %x\n", __func__, greset.d32, greset.b.ahbidle); break; } } while (greset.b.ahbidle == 0); #ifdef WITH_REBOOT_DELAY MDELAY(WITH_REBOOT_DELAY); #endif /* Core Soft Reset */ count = 0; greset.b.csftrst = 1; dwc_write_reg32( &global_regs->grstctl, greset.d32 ); #ifdef REBOOT_DELAY MDELAY(REBOOT_DELAY); #endif do { greset.d32 = dwc_read_reg32( &global_regs->grstctl); if (++count > 10000) { DWC_WARN("%s() HANG! Soft Reset GRSTCTL=%0x\n", __func__, greset.d32); return -1; } } while (greset.b.csftrst == 1); /* Wait for 3 PHY Clocks*/ //DWC_PRINT("100ms\n"); #ifdef REBOOT_DELAY MDELAY(REBOOT_DELAY); #endif return 0; } /* * Register HCD callbacks. The callbacks are used to start and stop * the HCD for interrupt processing. * * @param _core_if Programming view of DWC_otg controller. * @param _cb the HCD callback structure. * @param _p pointer to be passed to callback function (usb_hcd*). */ #ifdef DWC_IS_HOST extern void dwc_otg_cil_register_hcd_callbacks( dwc_otg_core_if_t *_core_if, dwc_otg_cil_callbacks_t *_cb, void *_p) { _core_if->hcd_cb = _cb; _cb->p = _p; } #endif /* * Register PCD callbacks. The callbacks are used to start and stop * the PCD for interrupt processing. * * @param _core_if Programming view of DWC_otg controller. * @param _cb the PCD callback structure. * @param _p pointer to be passed to callback function (pcd*). */ #ifdef DWC_IS_DEVICE extern void dwc_otg_cil_register_pcd_callbacks( dwc_otg_core_if_t *_core_if, dwc_otg_cil_callbacks_t *_cb, void *_p) { _core_if->pcd_cb = _cb; _cb->p = _p; } #endif