/****************************************************************************** ** ** FILE NAME : amazon_s_dma_core.c ** PROJECT : Danube ** MODULES : Central DMA ** ** DATE : 26 SEP 2005 ** AUTHOR : Wu Qi Ming ** DESCRIPTION : Central DMA Driver ** COPYRIGHT : Copyright (c) 2006 ** Infineon Technologies AG ** Am Campeon 1-12, 85579 Neubiberg, Germany ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** HISTORY ** $Date $Author $Comment ** 26 SEP 2005 Wu Qi Ming Initiate Version ** 25 OCT 2006 Xu Liang Add GPL header. ** 10 Nov 2006 TC Chen change the descriptor length ** 20 Dec 2006 TC Chen Fix cache sync issue. ** 8 Jan 2007 Xu Liang Declare g_dma_in_process and ** g_dma_int_status as volatile object ** and fix problem caused by compiler optimization ** 23 Jan 2007 Xu Liang In function "rx_chan_intr_handler", risk ** condition takes place between interrupt status ** is clean and g_dma_int_status is clean, ** to fix it, check if descriptor is valid after ** clean interrupt status. ** 14 AUG 2007 Xu Liang Add NAPI support. ** 18 Jul 2008 Lei Chuanhua Remove enable_irq/disable_irq which causes interrupts ** to come again in tasklet on the same dma channel. ** Using DMA module interrupt to fix interrupt interference ** during tasklet. ** 28 Nov 2008 Lei Chuanhua Added clear SPI interrupt bit function. *******************************************************************************/ #include /* retrieve the CONFIG_* macros */ #if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS) #define MODVERSIONS #endif #if defined(MODVERSIONS) && !defined(__GENKSYMS__) //#include #endif #ifndef EXPORT_SYMTAB #define EXPORT_SYMTAB /* need this one 'cause we export symbols */ #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include //#include #include #include #include #include #include #include #define ENABLE_NAPI 1 #if defined(CONFIG_IFX_PPA) || defined(CONFIG_IFX_PPA_MODULE) #define ENABLE_IFX_PPA 1 #else #define ENABLE_IFX_PPA 0 #endif /********************definitions for chip dependant macros *******************/ #define DMA_CTRL AMAZON_S_DMA_CTRL #define DMA_CPOLL AMAZON_S_DMA_CPOLL #define DMA_CS AMAZON_S_DMA_CS(0) #define DMA_CCTRL AMAZON_S_DMA_CCTRL(0) #define DMA_CDBA AMAZON_S_DMA_CDBA(0) #define DMA_CDLEN AMAZON_S_DMA_CDLEN(0) #define DMA_CIS AMAZON_S_DMA_CIS(0) #define DMA_CIE AMAZON_S_DMA_CIE(0) #define DMA_PS AMAZON_S_DMA_PS(0) #define DMA_PCTRL AMAZON_S_DMA_PCTRL(0) #define DMA_IRNEN AMAZON_S_DMA_IRNEN #define DMA_CH0_INT AMAZON_S_DMA_CH0_INT #define DMA_CH1_INT AMAZON_S_DMA_CH1_INT #define DMA_CH2_INT AMAZON_S_DMA_CH2_INT #define DMA_CH3_INT AMAZON_S_DMA_CH3_INT #define DMA_CH4_INT AMAZON_S_DMA_CH4_INT #define DMA_CH5_INT AMAZON_S_DMA_CH5_INT #define DMA_CH6_INT AMAZON_S_DMA_CH6_INT #define DMA_CH7_INT AMAZON_S_DMA_CH7_INT #define DMA_CH8_INT AMAZON_S_DMA_CH8_INT #define DMA_CH9_INT AMAZON_S_DMA_CH9_INT #define DMA_CH10_INT AMAZON_S_DMA_CH10_INT #define DMA_CH11_INT AMAZON_S_DMA_CH11_INT #define DMA_CH12_INT AMAZON_S_DMA_CH12_INT #define DMA_CH13_INT AMAZON_S_DMA_CH13_INT #define DMA_CH14_INT AMAZON_S_DMA_CH14_INT #define DMA_CH15_INT AMAZON_S_DMA_CH15_INT #define DMA_CH16_INT AMAZON_S_DMA_CH16_INT #define DMA_CH17_INT AMAZON_S_DMA_CH17_INT #define DMA_CH18_INT AMAZON_S_DMA_CH18_INT #define DMA_CH19_INT AMAZON_S_DMA_CH19_INT /*****************definitions for the macros used in dma-core.c***************/ #undef USE_TX_INT #define IFX_SUCCESS 1 #define IFX_ERROR 0 #define IFX_DMA_EMSG(fmt, args...) printk( KERN_ERR "[%s %d]: " fmt,__func__, __LINE__, ## args) #ifdef DMA_DEBUG #define IFX_DMA_DMSG(fmt, args...) printk( KERN_INFO "[%s %d]: " fmt,__func__, __LINE__, ## args) #else #define IFX_DMA_DMSG(fmt, args...) do {} while(0) #endif #define MAX_DMA_DEVICE_NUM 6 /*max ports connecting to dma*/ #define MAX_DMA_CHANNEL_NUM 20 /*max dma channels*/ #define DMA_INT_BUDGET IFX_DMA_DESCRIPTOR_OFFSET #define DMA_POLL_COUNTER 4 /*fix me, set the correct counter value here!*/ /******************Global variables ******************************************/ struct proc_dir_entry* g_dma_dir; u64* g_desc_list; static u64 *g_desc_list_backup = NULL; _dma_device_info dma_devs[MAX_DMA_DEVICE_NUM]; _dma_channel_info dma_chan[MAX_DMA_CHANNEL_NUM]; char global_device_name[MAX_DMA_DEVICE_NUM][20]={{"PPE"},{"DEU"},{"SPI"},{"SDIO"},{"MCTRL0"},{"MCTRL1"}}; _dma_chan_map default_dma_map[MAX_DMA_CHANNEL_NUM]={ {"PPE", IFX_DMA_RX, 0, DMA_CH0_INT, 0}, {"PPE", IFX_DMA_TX, 0, DMA_CH1_INT, 0}, {"PPE", IFX_DMA_RX, 1, DMA_CH2_INT, 1}, {"PPE", IFX_DMA_TX, 1, DMA_CH3_INT, 1}, {"PPE", IFX_DMA_RX, 2, DMA_CH4_INT, 2}, {"PPE", IFX_DMA_TX, 2, DMA_CH5_INT, 2}, {"PPE", IFX_DMA_RX, 3, DMA_CH6_INT, 3}, {"PPE", IFX_DMA_TX, 3, DMA_CH7_INT, 3}, {"DEU", IFX_DMA_RX, 0, DMA_CH8_INT, 0}, {"DEU", IFX_DMA_TX, 0, DMA_CH9_INT, 0}, {"DEU", IFX_DMA_RX, 1, DMA_CH10_INT, 1}, {"DEU", IFX_DMA_TX, 1, DMA_CH11_INT, 1}, {"SPI", IFX_DMA_RX, 0, DMA_CH12_INT, 0}, {"SPI", IFX_DMA_TX, 0, DMA_CH13_INT, 0}, {"SDIO", IFX_DMA_RX, 0, DMA_CH14_INT, 0}, {"SDIO", IFX_DMA_TX, 0, DMA_CH15_INT, 0}, {"MCTRL0", IFX_DMA_RX, 0, DMA_CH16_INT, 0}, {"MCTRL0", IFX_DMA_TX, 0, DMA_CH17_INT, 0}, {"MCTRL1", IFX_DMA_RX, 1, DMA_CH18_INT, 1}, {"MCTRL1", IFX_DMA_TX, 1, DMA_CH19_INT, 1}}; _dma_chan_map* chan_map=default_dma_map; volatile u32 g_dma_int_status=0; volatile int g_dma_in_process=0;/*0=not in process,1=in process*/ /******************************************************************************/ #if defined(ENABLE_NAPI) && ENABLE_NAPI volatile u32 g_dma_poll_int_status = 0; #endif /***********function definitions***********************************************/ void do_dma_tasklet(unsigned long); DECLARE_TASKLET(dma_tasklet,do_dma_tasklet,0); irqreturn_t dma_interrupt(int irq, void *dev_id); /******************************************************************************/ int select_chan(int chan_num) { *DMA_CS=chan_num; return IFX_SUCCESS; } int enable_chan(int chan_num) { *DMA_CS=chan_num; *DMA_CCTRL|=1; return IFX_SUCCESS; } int disable_chan(int chan_num) { *DMA_CS=chan_num; *DMA_CCTRL&=~1; return IFX_SUCCESS; } u8* common_buffer_alloc(int len,int* byte_offset,void** opt) { u8 *buffer = (u8 *) kmalloc (len * sizeof (u8), GFP_ATOMIC); *byte_offset=0; return buffer; } int common_buffer_free(u8* dataptr,void* opt) { if(dataptr) kfree(dataptr); return 0; } int enable_ch_irq(_dma_channel_info* pCh) { int chan_no=(int)(pCh-dma_chan); unsigned long flag; local_irq_save(flag); *DMA_CS=chan_no; *DMA_CIE = 0x4a; *DMA_IRNEN |=1<dir==IFX_DMA_RX) enable_ch_irq(pCh); local_irq_restore(flag); return IFX_SUCCESS; } int close_chan(_dma_channel_info* pCh) { unsigned long flag; int chan_num=(int)(pCh-dma_chan); local_irq_save(flag); *DMA_CS=chan_num; *DMA_CCTRL&=~1; disable_ch_irq(pCh); local_irq_restore(flag); return IFX_SUCCESS; } int reset_chan(_dma_channel_info* pCh) { int chan_num=(int)(pCh-dma_chan); *DMA_CS=chan_num; *DMA_CCTRL|=2; return IFX_SUCCESS; } void rx_chan_intr_handler(int chan_no) { _dma_device_info* pDev=(_dma_device_info*)dma_chan[chan_no].dma_dev; _dma_channel_info* pCh=&dma_chan[chan_no]; struct rx_desc* rx_desc_p; int tmp; unsigned long flag; /*handle command complete interrupt*/ rx_desc_p=(struct rx_desc*)pCh->desc_base+pCh->curr_desc; if ( rx_desc_p->status.field.OWN == CPU_OWN && rx_desc_p->status.field.C ) { /*Every thing is correct, then we inform the upper layer*/ pDev->current_rx_chan=pCh->rel_chan_no; if(pDev->intr_handler) pDev->intr_handler(pDev, RCV_INT); pCh->weight--; } else { local_irq_save(flag); tmp=*DMA_CS; *DMA_CS=chan_no; *DMA_CIS |=0x7e; *DMA_CS=tmp; if ( rx_desc_p->status.field.OWN != CPU_OWN ) g_dma_int_status &=~(1<current_tx_chan=pCh->rel_chan_no; if(pDev->intr_handler) pDev->intr_handler(pDev, TRANSMIT_CPT_INT); } void do_dma_tasklet(unsigned long unused) { int i; int chan_no=0; int budget=DMA_INT_BUDGET; int weight=0; unsigned long flag; while(g_dma_int_status) { if(budget--<0) { tasklet_schedule(&dma_tasklet); return; } chan_no=-1; weight=0; /*WFQ algorithm to select the channel*/ for(i=0;i 0 ) { if(dma_chan[i].weight>weight) { chan_no=i; weight=dma_chan[chan_no].weight; } } } if(chan_no>=0){ if(chan_map[chan_no].dir==IFX_DMA_RX) rx_chan_intr_handler(chan_no); else tx_chan_intr_handler(chan_no); } else/*reset all the channels*/ { for(i=0;i19) { printk("dma_interrupt irq=%d chan_num=%d\n",irq,chan_num); } /* * Disable interrupt on this channel, later tasklet will enable it after * processing all available packets on this channel. */ *DMA_IRNEN &= ~(1 << chan_num); /* Record this channel interrupt for tasklet */ g_dma_int_status |= (1 << chan_num); #if defined(ENABLE_NAPI) && ENABLE_NAPI if ( pCh->dir == IFX_DMA_RX ) { _dma_device_info* pDev=(_dma_device_info*)pCh->dma_dev; if ( pDev->activate_poll ) { // handled by polling rather than tasklet g_dma_int_status &= ~(1 << chan_num); g_dma_poll_int_status |= 1 << chan_num; pDev->activate_poll(pDev); return IRQ_RETVAL(1); } } #endif // if(!g_dma_in_process)/*if not in process, then invoke the tasklet*/ if ( !g_dma_in_process ) { g_dma_in_process = 1; tasklet_schedule(&dma_tasklet); } return IRQ_HANDLED; } _dma_device_info* dma_device_reserve(char* dev_name) { int i; for(i=0;ireserved=0; return IFX_SUCCESS; } int dma_device_register(_dma_device_info* dev) { int result=IFX_SUCCESS; int i,j; int chan_no=0; u8* buffer; int byte_offset; unsigned long flag; _dma_device_info* pDev; _dma_channel_info* pCh; struct rx_desc* rx_desc_p; struct tx_desc* tx_desc_p; #if 0 if(strcmp(dev->device_name,"MCTRL0")==0||strcmp(dev->device_name,"MCTRL1")==0) { /*select the port*/ *DMA_PS=4; /*set port parameters*/ *DMA_PCTRL|=1<<16;/*flush memcopy*/ } #endif if ( strcmp(dev->device_name, "PPE") == 0 ) // switch { int rx_burst, tx_burst; switch ( dev->rx_burst_len ) { case 2: default: rx_burst = 1 << 2; break; case 4: rx_burst = 2 << 2; break; case 8: rx_burst = 3 << 2; break; } switch ( dev->tx_burst_len ) { case 2: default: tx_burst = 1 << 4; break; case 4: tx_burst = 2 << 4; break; case 8: tx_burst = 3 << 4; break; } *DMA_PS = 0; *DMA_PCTRL = (*DMA_PCTRL & ~0x3C) | rx_burst | tx_burst; } for(i=0;imax_tx_chan_num;i++) { pCh=dev->tx_chan[i]; if(pCh->control==IFX_DMA_CH_ON) { chan_no=(int)(pCh-dma_chan); for(j=0;jdesc_len;j++) { tx_desc_p=(struct tx_desc*)pCh->desc_base+j; memset(tx_desc_p,0,sizeof(struct tx_desc)); } local_irq_save(flag); *DMA_CS=chan_no; /*check if the descriptor base is changed*/ if(*DMA_CDBA!=(u32)CPHYSADDR(pCh->desc_base)) *DMA_CDBA=(u32)CPHYSADDR(pCh->desc_base); /*check if the descriptor length is changed*/ if(*DMA_CDLEN!=pCh->desc_len) *DMA_CDLEN=pCh->desc_len; *DMA_CCTRL&=~1; *DMA_CCTRL|=2; while(*DMA_CCTRL&2){}; //*DMA_CIE=0x0a; *DMA_IRNEN|=1<device_name, "PPE") == 0 && (i == 0 || i == 1) ) *DMA_CCTRL = 0x01030100 | (i << 9); #endif local_irq_restore(flag); } } for(i=0;imax_rx_chan_num;i++) { pCh=dev->rx_chan[i]; if(pCh->control==IFX_DMA_CH_ON) { chan_no=(int)(pCh-dma_chan); for(j=0;jdesc_len;j++) { rx_desc_p=(struct rx_desc*)pCh->desc_base+j; pDev=(_dma_device_info*)(pCh->dma_dev); buffer=pDev->buffer_alloc(pCh->packet_size,&byte_offset,(void*)&(pCh->opt[j])); if(!buffer) break; #ifndef CONFIG_MIPS_UNCACHED // xuliang: invalidation dma_cache_inv((unsigned long)buffer, pCh->packet_size); #endif rx_desc_p->Data_Pointer=(u32)CPHYSADDR((u32)buffer); rx_desc_p->status.word = 0; rx_desc_p->status.field.byte_offset=byte_offset; rx_desc_p->status.field.OWN=DMA_OWN; rx_desc_p->status.field.data_length=pCh->packet_size; } local_irq_save(flag); *DMA_CS=chan_no; /*check if the descriptor base is changed*/ if(*DMA_CDBA!=(u32)CPHYSADDR(pCh->desc_base)) *DMA_CDBA=(u32)CPHYSADDR(pCh->desc_base); /*check if the descriptor length is changed*/ if(*DMA_CDLEN!=pCh->desc_len) *DMA_CDLEN=pCh->desc_len; *DMA_CCTRL&=~1; *DMA_CCTRL|=2; while(*DMA_CCTRL&2){}; *DMA_CIE=0x0A;/*fix me, should enable all the interrupts here?*/ *DMA_IRNEN|=1<device_name,"MCTRL0")==0||strcmp(dev->device_name,"MCTRL1")==0) { /*select the port*/ *DMA_PS=4; /*set port parameters*/ *DMA_PCTRL|=1<<16;/*flush memcopy*/ } #endif for(i=0;imax_tx_chan_num;i++) { pCh=dev->tx_chan[i]; if(pCh->control==IFX_DMA_CH_ON) { chan_num=(int)(dev->tx_chan[i]-dma_chan); //down(danube_dma_sem); local_irq_save(flag); *DMA_CS=chan_num; pCh->curr_desc=0; pCh->prev_desc=0; pCh->control=IFX_DMA_CH_OFF; *DMA_CIE=0;/*fix me, should disable all the interrupts here?*/ *DMA_IRNEN&=~(1<desc_len;j++) { tx_desc_p=(struct tx_desc*)pCh->desc_base+j; if((tx_desc_p->status.field.OWN==CPU_OWN&&tx_desc_p->status.field.C)||\ (tx_desc_p->status.field.OWN==DMA_OWN&&tx_desc_p->status.field.data_length>0)) { dev->buffer_free((u8*)__va(tx_desc_p->Data_Pointer),(void*)pCh->opt[j]); } tx_desc_p->status.field.OWN=CPU_OWN; memset(tx_desc_p,0,sizeof(struct tx_desc)); } /*fix me: should free buffer that is not transferred by dma*/ } } for(i=0;imax_rx_chan_num;i++) { pCh=dev->rx_chan[i]; chan_num=(int)(dev->rx_chan[i]-dma_chan); local_irq_save(flag); g_dma_int_status &=~(1<curr_desc=0; pCh->prev_desc=0; pCh->control=IFX_DMA_CH_OFF; *DMA_CS=chan_num; if(*DMA_CS!=chan_num) printk(__FILE__ ":%d:%s: *DMA_CS (%d) != chan_num (%d)", __LINE__, __FUNCTION__, *DMA_CS, chan_num); *DMA_CIE=0;/*fix me, should disable all the interrupts here?*/ *DMA_IRNEN&=~(1<desc_len;j++) { rx_desc_p=(struct rx_desc*)pCh->desc_base+j; if((rx_desc_p->status.field.OWN==CPU_OWN&&rx_desc_p->status.field.C)||\ (rx_desc_p->status.field.OWN==DMA_OWN&&rx_desc_p->status.field.data_length>0)) { dev->buffer_free((u8*)__va(rx_desc_p->Data_Pointer),(void*)pCh->opt[j]); } } } return result; } /** * This function applies to the DMA synchronization case. That is, when DMA client * driver actively requires data, instead of interrupt/event driven. The basic flow * as follows, * 1) DMA client driver prepares DMA descriptor * 2) Enable DMA client driver control function * 3) Wait for DMA reception interrupt is coming * In this way, there is no prealloc memory, no memory copy. All buffers are prepared * by DMA client driver */ int dma_device_desc_setup(_dma_device_info *dma_dev, char *buf, size_t len) { int byte_offset = 0; struct rx_desc *rx_desc_p; _dma_channel_info *pCh = dma_dev->rx_chan[dma_dev->current_rx_chan]; rx_desc_p = (struct rx_desc *) pCh->desc_base + pCh->curr_desc; #ifndef CONFIG_MIPS_UNCACHED dma_cache_inv ((unsigned long) buf, len); #endif pCh->opt[pCh->curr_desc] = NULL; rx_desc_p->Data_Pointer = (u32)CPHYSADDR((u32)buf); rx_desc_p->status.word = (DMA_OWN << 31) |((byte_offset) << 23)| len; wmb(); /* Increase descriptor index and process wrap around */ pCh->curr_desc++; if (pCh->curr_desc == pCh->desc_len) { pCh->curr_desc = 0; } return 0; } EXPORT_SYMBOL(dma_device_desc_setup); /** * This function applies to the DMA synchronization case. * All buffers are provided by DMA client. DMA client driver * has no method to modify descriptor state. The only method * is to change interrupt status so that DMA tasklet don't * need to run again and again. */ int dma_device_clear_int(_dma_device_info *dma_dev, int dir) { int chan_no; _dma_channel_info *pCh; unsigned long flag; if (dir == 0) { pCh = dma_dev->rx_chan[dma_dev->current_rx_chan]; } else { pCh = dma_dev->tx_chan[dma_dev->current_tx_chan]; } chan_no = (int)(pCh - dma_chan); local_irq_save(flag); g_dma_int_status &= ~(1 << chan_no); local_irq_restore(flag); return 0; } EXPORT_SYMBOL(dma_device_clear_int); int dma_device_read(struct dma_device_info* dma_dev, u8** dataptr, void** opt) { u8* buf; int len; int byte_offset=0; void* p=NULL; _dma_channel_info* pCh=dma_dev->rx_chan[dma_dev->current_rx_chan]; struct rx_desc* rx_desc_p; /*get the rx data first*/ rx_desc_p=(struct rx_desc*)pCh->desc_base+pCh->curr_desc; if( !(rx_desc_p->status.field.OWN == CPU_OWN && rx_desc_p->status.field.C) ) { return 0; } buf=(u8*)__va(rx_desc_p->Data_Pointer); *(u32*)dataptr=(u32)buf; len=rx_desc_p->status.field.data_length; if(opt) { *(int*)opt=(int)pCh->opt[pCh->curr_desc]; } /*replace with a new allocated buffer*/ buf=dma_dev->buffer_alloc(pCh->packet_size,&byte_offset,&p); if(buf){ pCh->opt[pCh->curr_desc]=p; #ifndef CONFIG_MIPS_UNCACHED // xuliang: invalidation dma_cache_inv((unsigned long)buf, pCh->packet_size); #endif rx_desc_p->Data_Pointer=(u32)CPHYSADDR((u32)buf); wmb(); rx_desc_p->status.word =(DMA_OWN<<31) \ |((byte_offset)<<23) \ |pCh->packet_size; } else { *(u32*)dataptr = 0; if(opt) *(int*)opt = 0; len = 0; rx_desc_p->status.field.data_length = pCh->packet_size; rx_desc_p->status.field.OWN = DMA_OWN; } /*increase the curr_desc pointer*/ pCh->curr_desc++; if(pCh->curr_desc==pCh->desc_len) pCh->curr_desc=0; /*return the length of the received packet*/ return len; } int dma_device_write(struct dma_device_info* dma_dev, u8* dataptr, int len,void* opt) { u32 tmp,byte_offset; _dma_channel_info* pCh; int chan_no; struct tx_desc* tx_desc_p; u32 tcs; unsigned long flag; pCh=dma_dev->tx_chan[dma_dev->current_tx_chan]; chan_no=(int)(pCh-(_dma_channel_info*)dma_chan); tx_desc_p=(struct tx_desc*)pCh->desc_base+pCh->prev_desc; while(tx_desc_p->status.field.OWN==CPU_OWN && tx_desc_p->status.field.C) { dma_dev->buffer_free((u8*)__va(tx_desc_p->Data_Pointer),pCh->opt[pCh->prev_desc]); memset(tx_desc_p,0,sizeof(struct tx_desc)); pCh->prev_desc=(pCh->prev_desc+1)%(pCh->desc_len); tx_desc_p=(struct tx_desc*)pCh->desc_base+pCh->prev_desc; } tx_desc_p=(struct tx_desc*)pCh->desc_base+pCh->curr_desc; /*Check whether this descriptor is available*/ if(tx_desc_p->status.field.OWN==DMA_OWN ||tx_desc_p->status.field.C ){ /*if not , the tell the upper layer device*/ dma_dev->intr_handler(dma_dev,TX_BUF_FULL_INT); return 0; } pCh->opt[pCh->curr_desc]=opt; /*byte offset----to adjust the starting address of the data buffer, should be multiple of the burst length.*/ byte_offset=((u32)CPHYSADDR((u32)dataptr))%((dma_dev->tx_burst_len)*4); #ifndef CONFIG_MIPS_UNCACHED dma_cache_wback((unsigned long) dataptr,len); #endif //CONFIG_MIPS_UNCACHED tx_desc_p->Data_Pointer=(u32)CPHYSADDR((u32)dataptr)-byte_offset; wmb(); tx_desc_p->status.word = (DMA_OWN<<31) |DMA_DESC_SOP_SET |DMA_DESC_EOP_SET |((byte_offset)<<23) |len; pCh->curr_desc++; if(pCh->curr_desc==pCh->desc_len) pCh->curr_desc=0; /*Check whether this descriptor is available*/ tx_desc_p=(struct tx_desc*)pCh->desc_base+pCh->curr_desc; if(tx_desc_p->status.field.OWN==DMA_OWN){ /*if not , the tell the upper layer device*/ dma_dev->intr_handler(dma_dev,TX_BUF_FULL_INT); } local_irq_save(flag); tcs = *DMA_CS; *DMA_CS = chan_no; tmp = *DMA_CCTRL; *DMA_CS = tcs; local_irq_restore(flag); if(!(tmp&1)) pCh->open(pCh); return len; } int dma_device_poll(struct dma_device_info* dma_dev, int work_to_do, int *work_done) { #if defined(ENABLE_NAPI) && ENABLE_NAPI int ret; unsigned long flag; int dma_int_status_mask; int chan_no = 0; _dma_channel_info *pCh; struct rx_desc *rx_desc_p; int i; int tmp; dma_int_status_mask = 0; for ( i = 0; i < dma_dev->max_rx_chan_num; i++ ) dma_int_status_mask |= 1 << (dma_dev->rx_chan[i] - dma_chan); // printk("dma_device_poll: dma_int_status_mask = 0x%08X, g_dma_poll_int_status = 0x%08X\n", dma_int_status_mask, g_dma_poll_int_status); i = 0; while ( (g_dma_poll_int_status & dma_int_status_mask) ) { chan_no = dma_dev->rx_chan[i] - dma_chan; if ( (g_dma_poll_int_status & (1 << chan_no)) ) { pCh = &dma_chan[chan_no]; rx_desc_p = (struct rx_desc*)pCh->desc_base + pCh->curr_desc; if ( rx_desc_p->status.field.OWN == CPU_OWN && rx_desc_p->status.field.C ) { dma_dev->current_rx_chan = pCh->rel_chan_no; if ( dma_dev->intr_handler ) dma_dev->intr_handler(dma_dev, RCV_INT); //printk("receive one packet\n"); if ( ++*work_done >= work_to_do ) return 1; } else { //printk("clear chan %d\n", chan_no); // no available descriptor in this channel local_irq_save(flag); tmp = *DMA_CS; *DMA_CS = chan_no; *DMA_CIS |= 0x7e; *DMA_CS = tmp; if ( rx_desc_p->status.field.OWN != CPU_OWN ) g_dma_poll_int_status &= ~(1<max_rx_chan_num ) i = 0; } local_irq_save(flag); if ( (g_dma_poll_int_status & dma_int_status_mask) == 0 ) { if ( dma_dev->inactivate_poll ) dma_dev->inactivate_poll(dma_dev); ret = 0; } else ret = 1; local_irq_restore(flag); return ret; #else return 0; #endif } int desc_list_proc_read(char *buf, char **start, off_t offset, int count, int *eof, void *data) { int i,j; int len=0; len+=sprintf(buf+len,"descriptor list:\n"); for(i=0;ipri=map[j].pri; dma_chan[j].dma_dev=(void*)&dma_devs[i]; /*have to program the class value into the register later, fix me*/ } else if(map[j].dir==IFX_DMA_TX) /*TX direction*/ { dma_chan[j].dir=IFX_DMA_TX; dma_devs[i].max_tx_chan_num++; dma_devs[i].tx_chan[dma_devs[i].max_tx_chan_num-1]= &dma_chan[j]; dma_devs[i].tx_chan[dma_devs[i].max_tx_chan_num-1]->pri=map[j].pri; dma_chan[j].dma_dev=(void*)&dma_devs[i]; } else printk("WRONG MAP!\n"); } } } return IFX_SUCCESS; } int dma_chip_init(void) { int i; DMA_PMU_SETUP(PMU_ENABLE); /*reset DMA, necessary?*/ *DMA_CTRL|=1; *DMA_IRNEN=0;/*disable all the interrupts first*/ for(i=0;i