/*------------------------------------------------------------------------------------------*\ * Copyright (C) 2006,2007,2008,2009,2010 AVM GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA \*------------------------------------------------------------------------------------------*/ #include #include #include #include #include #include #include #include /*--- #include ---*/ /* FIXME Is this still needed? */ #if defined(CONFIG_MIPS_UR8) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)) #include #else /*--- #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)) ---*/ #include #endif /*--- #else ---*/ /*--- #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)) ---*/ #include #endif /*--- #if defined(CONFIG_MIPS_UR8) ---*/ #include #if !defined(CONFIG_NETCHIP_ADM69961) #define CONFIG_NETCHIP_ADM69961 #endif #include #include "cpmac_if.h" #include "cpmac_const.h" #include "cpmac_debug.h" #include "cpphy_const.h" #include "cpphy_types.h" #include "cpphy_cppi.h" #include "cpphy_if.h" #include "cpphy_if_g.h" #include "cpmac_reg.h" /* swap mac addr with [5 - ()] */ #define DA(a, bit) ((unsigned int)(((a)[5-((bit)>>3)] >> ((bit) & 7)) & 1)) #if !(defined(CONFIG_FUSIV_VX180) || defined(CONFIG_FUSIV_VX185) || defined(CONFIG_ARCH_PUMA5)) /*------------------------------------------------------------------------------------------*\ * according to cpmac manual \*------------------------------------------------------------------------------------------*/ static unsigned int cpphy_cppi_hash_fun(unsigned char *a) { unsigned int hash; hash = DA(a,0) ^ DA(a, 6) ^ DA(a,12) ^ DA(a,18) ^ DA(a,24) ^ DA(a,30) ^ DA(a,36) ^ DA(a,42); hash |= (DA(a,1) ^ DA(a, 7) ^ DA(a,13) ^ DA(a,19) ^ DA(a,25) ^ DA(a,31) ^ DA(a,37) ^ DA(a,43)) << 1; hash |= (DA(a,2) ^ DA(a, 8) ^ DA(a,14) ^ DA(a,20) ^ DA(a,26) ^ DA(a,32) ^ DA(a,38) ^ DA(a,44)) << 2; hash |= (DA(a,3) ^ DA(a, 9) ^ DA(a,15) ^ DA(a,21) ^ DA(a,27) ^ DA(a,33) ^ DA(a,39) ^ DA(a,45)) << 3; hash |= (DA(a,4) ^ DA(a,10) ^ DA(a,16) ^ DA(a,22) ^ DA(a,28) ^ DA(a,34) ^ DA(a,40) ^ DA(a,46)) << 4; hash |= (DA(a,5) ^ DA(a,11) ^ DA(a,17) ^ DA(a,23) ^ DA(a,29) ^ DA(a,35) ^ DA(a,41) ^ DA(a,47)) << 5; return hash; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static void cpphy_cppi_add_hash(cpphy_cppi_t *cppi, unsigned char *MacAddress) { unsigned int HashValue; unsigned int HashBit; HashValue = cpphy_cppi_hash_fun (MacAddress); if(HashValue < 32) { HashBit = (1 << HashValue); cppi->hash1 |= HashBit; } else { HashBit = (1 << (HashValue - 32)); cppi->hash2 |= HashBit; } } #endif /*--- #if !(defined(CONFIG_FUSIV_VX180) || defined(CONFIG_FUSIV_VX185) || defined(CONFIG_ARCH_PUMA5)) ---*/ /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ void cpphy_cppi_set_multi_promiscous(cpphy_cppi_t *cppi __attribute__ ((unused)), unsigned int multi __attribute__ ((unused)), unsigned int promiscous __attribute__ ((unused))) { # if !(defined(CONFIG_FUSIV_VX180) || defined(CONFIG_FUSIV_VX185) || defined(CONFIG_ARCH_PUMA5)) /* Not needed, the FUSIV driver does this */ struct net_device *p_dev = cppi->cpmac_priv->owner; if(cppi->hw_state != CPPHY_HW_ST_OPENED) { DEB_WARN("[%s] illegal state %u\n", __FUNCTION__, cppi->hw_state); } else { int i; # if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) unsigned int RxMbpEnable; # endif /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ cppi->hash1 = 0; cppi->hash2 = 0; if(multi) { if(multi == 1) { cppi->hash1 = 0xffffffff; cppi->hash2 = 0xffffffff; } else { /* list of multicast addresses */ struct dev_mc_list *p_dmi = p_dev->mc_list; for(i = 0; i < p_dev->mc_count; i++, p_dmi = p_dmi->next) { DEB_INFO("[%s] add %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__, p_dmi->dmi_addr[0], p_dmi->dmi_addr[1], p_dmi->dmi_addr[2], p_dmi->dmi_addr[3], p_dmi->dmi_addr[4], p_dmi->dmi_addr[5]); cpphy_cppi_add_hash(cppi, p_dmi->dmi_addr); } } } # if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) RxMbpEnable = CPMAC_RX_MBP_ENABLE(p_dev->base_addr); RxMbpEnable &= ~(RX_CAF_EN | RX_MULT_EN); if(multi) RxMbpEnable |= RX_MULT_EN; if(promiscous) RxMbpEnable |= RX_CAF_EN; CPMAC_RX_MBP_ENABLE(p_dev->base_addr) = RxMbpEnable; CPMAC_MACHASH1(p_dev->base_addr) = cppi->hash1; CPMAC_MACHASH2(p_dev->base_addr) = cppi->hash2; DEB_TRC("[%s] MBP_ENABLE 0x%08X\n", __FUNCTION__, CPMAC_RX_MBP_ENABLE(p_dev->base_addr)); # elif defined(CONFIG_MIPS_UR8) /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ /* Promiscous mode for channel 0 */ cppi->cpmac_priv->CPGMAC_F->RX_MBP_ENABLE.Bits.rx_caf_en = promiscous ? 1 : 0; cppi->cpmac_priv->CPGMAC_F->RX_MBP_ENABLE.Bits.rx_prom_ch = 0; /* Enable multicast rx for channel 0 */ cppi->cpmac_priv->CPGMAC_F->RX_MBP_ENABLE.Bits.rx_mult_en = multi ? 1 : 0; cppi->cpmac_priv->CPGMAC_F->RX_MBP_ENABLE.Bits.rx_mult_ch = 0; cppi->cpmac_priv->CPGMAC_F->MAC_HASH1 = cppi->hash1; cppi->cpmac_priv->CPGMAC_F->MAC_HASH2 = cppi->hash2; DEB_TRC("[%s] MBP_ENABLE 0x%08X (%s, %s)\n", __FUNCTION__, cppi->cpmac_priv->CPGMAC_F->RX_MBP_ENABLE.Reg, promiscous ? "promiscous" : "not promiscous", multi ? "multicast" : "no multicast"); # else /*--- #elif defined(CONFIG_MIPS_UR8) ---*/ # warning "Multicast and promiscous mode needed for this architecture" # endif /*--- #else ---*/ /*--- #elif defined(CONFIG_MIPS_UR8) ---*/ } # endif /*--- #if !(defined(CONFIG_FUSIV_VX180) || defined(CONFIG_FUSIV_VX185) || defined(CONFIG_ARCH_PUMA5)) ---*/ } /*----------------------------------------------------------------------------------*\ add counter to stats (may be preinitialized by cpphy_mdio_update_hw_status()) \*----------------------------------------------------------------------------------*/ void cpphy_cppi_update_hw_status(cpphy_cppi_t *cppi __attribute__ ((unused)), struct net_device_stats *stats __attribute__ ((unused))) { # if !(defined(CONFIG_FUSIV_VX180) || defined(CONFIG_FUSIV_VX185) || defined(CONFIG_ARCH_PUMA5)) /* Not needed, this is done in the FUSIV driver */ unsigned int rx_crc_errors, rx_frame_errors, rx_length_errors, rx_over_errors, rx_fifo_errors, rx_missed_errors, tx_carrier_errors, tx_fifo_errors, collisions; # if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) struct net_device *p_dev = cppi->cpmac_priv->owner; stats->multicast += CPMAC_RXMULTICASTFRAMES(p_dev->base_addr); rx_crc_errors = CPMAC_RXCRCERRORS(p_dev->base_addr); /* received frame alignment error */ rx_frame_errors = CPMAC_RXALIGNCODEERRORS(p_dev->base_addr); rx_length_errors = CPMAC_RXJABBERFRAMES(p_dev->base_addr); rx_length_errors += CPMAC_RXUNDERSIZEDFRAMES(p_dev->base_addr); rx_length_errors += CPMAC_RXFRAGMENTS(p_dev->base_addr); /* receiver ring buffer overflow */ rx_over_errors = CPMAC_RXDMAOVERRUNS(p_dev->base_addr); /* receiver fifo overrun */ rx_fifo_errors = CPMAC_RXSOFOVERRUNS(p_dev->base_addr) + CPMAC_RXMOFOVERRUNS(p_dev->base_addr) - rx_over_errors; /* receiver missed packet: to check */ rx_missed_errors = CPMAC_RXFILTEREDFRAMES(p_dev->base_addr); DEB_TRC("cpphy_cppi_update_hw_status, %u, %u, %u, %u, %u, %u, %u, %u, %u\n", CPMAC_RXCRCERRORS(p_dev->base_addr), CPMAC_RXALIGNCODEERRORS(p_dev->base_addr), CPMAC_RXJABBERFRAMES(p_dev->base_addr), CPMAC_RXUNDERSIZEDFRAMES(p_dev->base_addr), CPMAC_RXFRAGMENTS(p_dev->base_addr), CPMAC_RXDMAOVERRUNS(p_dev->base_addr), CPMAC_RXSOFOVERRUNS(p_dev->base_addr), CPMAC_RXMOFOVERRUNS(p_dev->base_addr), CPMAC_RXFILTEREDFRAMES(p_dev->base_addr)); /* detailed tx_errors */ tx_carrier_errors = CPMAC_TXCARRIERSENSEERRORS(p_dev->base_addr); tx_fifo_errors = CPMAC_TXUNDERRUN(p_dev->base_addr); collisions = CPMAC_TXEXCESSIVECOLLISIONS(p_dev->base_addr) + CPMAC_TXLATECOLLISIONS(p_dev->base_addr); # elif defined(CONFIG_MIPS_UR8) /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ stats->multicast += cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxMulticastFrames; rx_crc_errors = cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxCRCErrors; /* received frame alignment error */ rx_frame_errors = cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxAlignCodeErrors; rx_length_errors = cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxJabberFrames; rx_length_errors += cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxUndersizedFrames; rx_length_errors += cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxOversizedFrames; rx_length_errors += cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxFragments; /* receiver ring buff overflow */ rx_over_errors = cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxDmaOverruns; /* receiver fifo overrun */ rx_fifo_errors = cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxSofOverruns + cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxMofOverruns - rx_over_errors; /* receiver missed packet: to check */ rx_missed_errors = cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxFilteredFrames; DEB_TRC("cpphy_cppi_update_hw_status, %u, %u, %u, %u, %u, %u, %u, %u, %u\n", cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxCRCErrors, cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxAlignCodeErrors, cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxJabberFrames, cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxUndersizedFrames, cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxFragments, cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxDmaOverruns, cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxSofOverruns, cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxMofOverruns, cppi->cpmac_priv->CPGMAC_F->STATISTIC.RxFilteredFrames); /* detailed tx_errors */ tx_carrier_errors = cppi->cpmac_priv->CPGMAC_F->STATISTIC.TxCarrierSenseErrors; tx_fifo_errors = cppi->cpmac_priv->CPGMAC_F->STATISTIC.TxUnderrun; collisions = cppi->cpmac_priv->CPGMAC_F->STATISTIC.TxExcessiveCollisions + cppi->cpmac_priv->CPGMAC_F->STATISTIC.TxLateCollisions; # else /*--- #elif defined(CONFIG_MIPS_UR8) ---*/ # warning "No update_hw_status defined for this architecture" # endif /*--- #else ---*/ /*--- #elif defined(CONFIG_MIPS_UR8) ---*/ stats->rx_crc_errors += rx_crc_errors; stats->rx_frame_errors += rx_frame_errors; stats->rx_length_errors += rx_length_errors; stats->rx_over_errors += rx_over_errors; stats->rx_fifo_errors += rx_fifo_errors; stats->rx_missed_errors += rx_missed_errors; stats->rx_errors += rx_length_errors + rx_crc_errors + rx_frame_errors + rx_missed_errors; stats->rx_dropped += rx_fifo_errors + rx_over_errors; /* detailed tx_errors */ stats->tx_carrier_errors += tx_carrier_errors; stats->tx_fifo_errors += tx_fifo_errors; stats->collisions += collisions; stats->tx_errors += tx_carrier_errors + tx_fifo_errors + collisions; # endif /*--- #if !(defined(CONFIG_FUSIV_VX180) || defined(CONFIG_FUSIV_VX185) || defined(CONFIG_ARCH_PUMA5)) ---*/ } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ void *cpphy_cppi_malloc_buffer(unsigned int size, unsigned int tot_buf_size, unsigned int tot_reserve_bytes, struct net_device *p_dev, struct sk_buff **skb) { if((*skb = dev_alloc_skb(tot_buf_size))) { (*skb)->dev = p_dev; skb_reserve(*skb, tot_reserve_bytes); return skb_put(*skb, size); } return NULL; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) static void *cpphy_cppi_malloc_buffer_startup(unsigned int size, unsigned int tot_buf_size, unsigned int tot_reserve_bytes, struct net_device *p_dev, struct sk_buff **skb) { if((*skb = alloc_skb(tot_buf_size, GFP_KERNEL))) { (*skb)->dev = p_dev; skb_reserve(*skb, tot_reserve_bytes); return skb_put(*skb, size); } return NULL; } #endif /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ /*----------------------------------------------------------------------------------*\ \*----------------------------------------------------------------------------------*/ void cpphy_cppi_free_rcb(cpphy_cppi_t *cppi) { cpphy_rcb_t *rcb; /* Free Rx data buffers attached to descriptors, if necessary */ rcb = cppi->RxPrevEnqueue; do { if(rcb->skb) { dev_kfree_skb_any((struct sk_buff *) rcb->skb); rcb->skb = 0; } rcb = (cpphy_rcb_t *) rcb->Next; } while(rcb && (rcb != cppi->RxPrevEnqueue)); /* free up all desciptors at once */ kfree(cppi->RcbStart); cppi->RcbStart = 0; } /*----------------------------------------------------------------------------------*\ \*----------------------------------------------------------------------------------*/ void cpphy_cppi_free_tcb(cpphy_cppi_t *cppi) { /* free all descriptors at once */ kfree(cppi->TcbStart); cppi->TcbStart = 0; } /*----------------------------------------------------------------------------------*\ \*----------------------------------------------------------------------------------*/ #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) static cpmac_err_t cpphy_cppi_rx_teardown_int(cpphy_cppi_t *cppi) { unsigned int base = ((struct net_device *)cppi->cpmac_priv->owner)->base_addr; /* check to see if the interrupt is a teardown interrupt */ if(((CPMAC_RX_INT_ACK( base , 0 )) & TEARDOWN_ACK) == TEARDOWN_ACK) { /* finish channel teardown */ /* Free channel resources on a FULL teardown */ if(cppi->RxTeardownPending & CPPHY_FULL_TEARDOWN) { cpphy_cppi_free_rcb(cppi); } /* write completion pointer */ (CPMAC_RX_INT_ACK(base, 0)) = TEARDOWN_ACK; /* no longer pending teardown */ cppi->RxTeardownPending &= ~CPPHY_RX_TEARDOWN; cppi->RxOpen = 0; CPMAC_RX_INTMASK_CLEAR(base) = (1<<0); cppi->RxTeardownPending = 0; return CPMAC_ERR_NOERR; } return CPMAC_ERR_NO_TEARDOWN; } #endif /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ /*----------------------------------------------------------------------------------*\ \*----------------------------------------------------------------------------------*/ #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) static cpmac_err_t cpphy_cppi_tx_teardown_int(cpphy_cppi_t *cppi) { unsigned int base = cppi->cpmac_priv->owner->base_addr; cpphy_tcb_t *Curr; if((CPMAC_TX_INT_ACK (base, cppi->TxChannel) & TEARDOWN_ACK) == TEARDOWN_ACK) { Curr = cppi->TxCurrDequeue; while(Curr->skb) { cpphy_if_tx_complete(cppi, (struct sk_buff *) Curr->skb, CPMAC_ERR_TEARDOWN); Curr->skb = NULL; Curr = (cpphy_tcb_t *) Curr->Next; } cppi->TxCurrDequeue = Curr; /* teardown for close */ if(cppi->TxTeardownPending & CPPHY_FULL_TEARDOWN) { cpphy_cppi_free_tcb(cppi); } /* write completion pointer */ (CPMAC_TX_INT_ACK(base, cppi->TxChannel)) = TEARDOWN_ACK; /* no longer pending teardown */ cppi->TxTeardownPending &= ~CPPHY_TX_TEARDOWN; if(cppi->TxTeardownPending & CPPHY_FULL_TEARDOWN) { cppi->TxOpen = 0; } if(cppi->TxTeardownPending & CPPHY_FULL_TEARDOWN) { CPMAC_TX_INTMASK_CLEAR(base) = (1 << cppi->TxChannel); } cppi->TxDmaActive = 0; # if defined(CPMAC_TX_TIMEOUT) && (CPMAC_TX_TIMEOUT > 0) if(cppi->TxTeardownPending & CPPHY_CALLBACK_TEARDOWN) { cppi->TxTeardownPending = 0; cpmac_if_teardown_complete(cppi->cpmac_priv); } else { # endif /*--- #if defined(CPMAC_TX_TIMEOUT) && (CPMAC_TX_TIMEOUT > 0) ---*/ cppi->TxTeardownPending = 0; # if defined(CPMAC_TX_TIMEOUT) && (CPMAC_TX_TIMEOUT > 0) } # endif /*--- #if defined(CPMAC_TX_TIMEOUT) && (CPMAC_TX_TIMEOUT > 0) ---*/ return CPMAC_ERR_NOERR; } return CPMAC_ERR_NO_TEARDOWN; } #endif /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) cpmac_err_t cpphy_cppi_rx_return(cpphy_cppi_t *cppi, struct sk_buff *skb) { char *pBuf; cpmac_err_t ret = CPMAC_ERR_NOERR; cpphy_rcb_t *rcb, *rcb_last; int base = ((struct net_device *) cppi->cpmac_priv->owner)->base_addr; if(cppi->hw_state != CPPHY_HW_ST_OPENED) { return CPMAC_ERR_CHAN_NOT_OPEN; } rcb_last = cppi->RxPrevEnqueue; rcb = rcb_last->Next; if(skb) { rcb->skb = skb; rcb->BufPtr = CPPHY_VIRT_TO_PHYS(skb->data); } else { pBuf = (char *) cpphy_cppi_malloc_buffer(CPPHY_MAX_RX_BUFFER_SIZE, CPPHY_TOTAL_RX_BUFFER_SIZE, CPPHY_TOTAL_RX_RESERVED, cppi->cpmac_priv->owner, &rcb->skb); if(pBuf) { /* malloc succeeded, requeue the RCB to the hardware */ rcb->BufPtr = CPPHY_VIRT_TO_PHYS(pBuf); } else { /* malloc failed, add this RCB to Needs Buffer List */ cppi->CurrNeedCount++; ret = CPMAC_ERR_NEED_BUFFER; } } if(ret == CPMAC_ERR_NOERR) { unsigned int dma_running; rcb->HNext = 0; rcb->Off_BLen = CPPHY_MAX_RX_BUFFER_SIZE; rcb->mode = CB_OWNERSHIP_BIT; /*----------------------------------------------------------------------------------*\ * If the ownership bit of the last rcb is set, DMA is still running. * \*----------------------------------------------------------------------------------*/ dma_running = rcb_last->mode & CB_OWNERSHIP_BIT ? 1 : 0; /*----------------------------------------------------------------------------------*\ * Add the rcb to the existing list after the last rcb * * Attention: This may destroy the ownership bit status, if the DMA controller * * changed it in nearly the same moment! \*----------------------------------------------------------------------------------*/ rcb_last->HNext = CPPHY_VIRT_TO_PHYS(rcb); cppi->RxPrevEnqueue = rcb; /* Add one free buffer to FreeBuffer count of CPMAC */ CPMAC_RX_FREEBUFFER(base, 0) = 1; if(dma_running) { /* We think DMA is still running */ /* Check the Rx DMA list head to see if we are really still running */ if((CPMAC_RX_HDP(base, 0)) != 0) { /* The rcb is chained and DMA is still running. Everything okay. */ return ret; } /* DMA stopped. Did it complete the current rcb as well? */ if(!(rcb->mode & CB_OWNERSHIP_BIT)) { /* Current rcb is used as well. Correct the possibly wrong mode of the *\ * last packet to allow the GC to work correctly. Transmitting is * \* finished after that. */ rcb_last->mode = CB_SOF_BIT | CB_EOF_BIT; /*--- cpphy_if_gc_tx_queue(cppi); ---*/ return ret; } } /* At this point DMA is not running. Set it up again with rcb as the start */ (CPMAC_RX_HDP(base, 0)) = CPPHY_VIRT_TO_PHYS(rcb); } return ret; } #endif /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ /*----------------------------------------------------------------------------------*\ \*----------------------------------------------------------------------------------*/ #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) void cpphy_cppi_needs_check(cpphy_cppi_t *cppi) { cpmac_err_t ret; while(cppi->CurrNeedCount) { ret = cpphy_cppi_rx_return(cppi, NULL); if(ret == CPMAC_ERR_NOERR) { cppi->CurrNeedCount--; } else { if(ret == CPMAC_ERR_NEED_BUFFER) { /* revert the increment in cpphy_cppi_rx_return () */ cppi->CurrNeedCount--; } break; } } } #endif /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ /*----------------------------------------------------------------------------------*\ main handler of receive interrupts \*----------------------------------------------------------------------------------*/ #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) cpmac_err_t cpphy_cppi_rx_int (cpphy_cppi_t *cppi) { cpphy_rcb_t *rcb, *rcb_last, *rcb_ack; unsigned int mode; int base = ((struct net_device *)cppi->cpmac_priv->owner)->base_addr; if(cppi->CurrNeedCount) { cpphy_cppi_needs_check (cppi); } /* Handle teardown interrupt */ if(cppi->RxTeardownPending) { if(cpphy_cppi_rx_teardown_int (cppi) == CPMAC_ERR_NOERR) { return CPMAC_ERR_NOERR; } } rcb = cppi->RxCurrDequeue; rcb_ack = (cpphy_rcb_t *)CPPHY_PHYS_TO_VIRT_NO_CACHE(CPMAC_RX_INT_ACK (base , 0)); mode = *((volatile unsigned int *) CPPHY_VIRT_TO_VIRT_NO_CACHE(&rcb_ack->mode)); if(!(mode & CB_OWNERSHIP_BIT)) { struct sk_buff *skb; /* Write the completion pointer */ (CPMAC_RX_INT_ACK (base, 0)) = CPPHY_VIRT_TO_PHYS(rcb_ack); do { skb = (struct sk_buff *)rcb->skb; rcb->skb = NULL; mode = *((volatile unsigned int *) CPPHY_VIRT_TO_VIRT_NO_CACHE(&rcb->mode)); if( (cppi->CurrNeedCount <= cppi->MaxNeedCount) && (mode & (CB_SOF_BIT | CB_EOF_BIT)) == (CB_SOF_BIT | CB_EOF_BIT)) { cpmac_if_data_from_phy (cppi->cpmac_priv, skb, mode & CB_SIZE_MASK); skb = NULL; } else { /*--- DEB_WARN("cpphy_cppi_rx_int, ignore pkt with ill flags: %#x\n", mode); ---*/ DEB_INFO("cpphy_cppi_rx_int, ignore pkt with ill flags: %#x\n", mode); cppi->cpmac_priv->local_stats_rx_errors++; } cpphy_cppi_rx_return (cppi, skb); rcb_last = rcb; rcb = rcb->Next; } while (rcb_last != rcb_ack); cppi->RxCurrDequeue = rcb; } else { /* should never happen: rx int while no buffer received */ DEB_ERR("cpphy_cppi_rx_int, ill rcb_ack: %p (%p,%p)\n", rcb_ack, cppi->RxFirst, cppi->RxLast); } return CPMAC_ERR_NOERR; } #endif /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ /*----------------------------------------------------------------------------------*\ main handler of transmit complete interrupts \*----------------------------------------------------------------------------------*/ #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) int cpphy_cppi_tx_int(cpphy_cppi_t *cppi) { cpphy_tcb_t *tcb, *tcb_ack, *tcb_last; unsigned int mode; unsigned int base = cppi->cpmac_priv->owner->base_addr; /* Handle teardown interrupt */ if(cppi->TxTeardownPending) { if(cpphy_cppi_tx_teardown_int(cppi) == CPMAC_ERR_NOERR) { return CPMAC_ERR_NOERR; } } tcb = cppi->TxCurrDequeue; tcb_ack = (cpphy_tcb_t *)CPPHY_PHYS_TO_VIRT_NO_CACHE(CPMAC_TX_INT_ACK(base, cppi->TxChannel)); mode = *((volatile unsigned int *) CPPHY_VIRT_TO_VIRT_NO_CACHE(&tcb_ack->mode)); if(!(mode & CB_OWNERSHIP_BIT)) { /* Write the completion pointer */ (CPMAC_TX_INT_ACK(base, cppi->TxChannel)) = CPPHY_VIRT_TO_PHYS(tcb_ack); /* if there is an error trace concerning locking: start here */ if(*((volatile unsigned int *) &tcb_ack->HNext)) { /* check possible race condition with dma */ if(mode & CB_EOQ_BIT) { /* restart dma if halted */ (CPMAC_TX_HDP(base, cppi->TxChannel)) = tcb_ack->HNext; } } else { cppi->TxDmaActive = 0; /* signal cpphy_if_data_to_phy() to restart dma */ if(*((volatile unsigned int *) &tcb_ack->HNext)) { /* this hopefully never happens, because of higher prio of tasklet */ DEB_ERR("cpphy_cppi_tx_int, need locking with cpphy_if_data_to_phy()\n"); } } /* if there is an error trace concerning locking: end here */ do { cpphy_if_tx_complete(cppi, (struct sk_buff *) tcb->skb, CPMAC_ERR_NOERR); tcb->skb = NULL; /* tcb may be reused immediately */ tcb_last = tcb; tcb = (cpphy_tcb_t *) tcb->Next; } while(tcb_last != tcb_ack); cppi->TxCurrDequeue = tcb; { struct net_device *p_dev = cppi->cpmac_priv->owner; struct Qdisc *q = p_dev->qdisc; if(netif_queue_stopped(p_dev)) { DEB_DEBUG("[%s] wake queue for %s\n", __FUNCTION__, p_dev->name); netif_wake_queue(p_dev); } else if(q->q.qlen) { netif_schedule(p_dev); } } } else { /* should never happen: tx int while no buffer completed */ DEB_ERR("[%s] ill tcb_ack: %p (%p,%p)\n", __FUNCTION__, tcb_ack, cppi->TxFirst, cppi->TxLast); } return CPMAC_ERR_NOERR; } #endif /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ /*----------------------------------------------------------------------------------*\ Transmit buffer descriptor allocation \*----------------------------------------------------------------------------------*/ #if !(defined(CONFIG_FUSIV_VX180) || defined(CONFIG_FUSIV_VX185) || defined(CONFIG_ARCH_PUMA5)) static cpmac_err_t cpphy_cppi_init_tcb(cpphy_cppi_t *cppi) { cpmac_err_t ret = CPMAC_ERR_NOERR; unsigned int i; unsigned int Num = CPPHY_MAX_TX_BUFFERS; cpphy_tcb_t *pTcb = NULL; char *AllTcb; int tcbSize, size_malloc; tcbSize = (sizeof (cpphy_tcb_t) + 0x3f) & ~0x3f; size_malloc = (tcbSize * Num) + 0x3f; /* if the memory has already been allocated, simply reuse it! */ if(!(AllTcb = cppi->TcbStart)) { /* malloc all TCBs at once */ if(!(AllTcb = (char *) kmalloc (size_malloc, GFP_KERNEL))) { ret = CPMAC_ERR_NOMEM; } else { dma_cache_wback_inv((unsigned long) AllTcb, size_malloc); /* keep this address for freeing later */ cppi->TcbStart = AllTcb; AllTcb = (char *) CPPHY_VIRT_TO_VIRT_NO_CACHE(AllTcb); memset(AllTcb, 0, size_malloc); } } if(AllTcb) { /* align to cache line */ AllTcb = (char *) (((unsigned int) AllTcb + 0x3f) & ~0x3f); if(Num <= cppi->NeededDMAtcbs) { DEB_WARN("[%s] Need more DMA tcbs (%u) than are available (%u)!\n", __FUNCTION__, cppi->NeededDMAtcbs, Num); return CPMAC_ERR_NOMEM; } cppi->TxFirst = (cpphy_tcb_t *) AllTcb; /* FIXME Is this correct? This is not the malloced address! */ cppi->TxDmaActive = 0; atomic_set(&cppi->dma_send_running, 0); atomic_set(&cppi->dequeue_running, 0); # if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) /* design: descriptors as logical ring buffer (with HW next ptr NULL terminated) */ /* TxPrevEnqueue: previously enqueued tx skb (dma active) */ /* TxCurrDequeue: next tx skb to be send completed */ cppi->TxCurrDequeue = (cpphy_tcb_t *) cppi->TxFirst; cppi->TxPrevEnqueue = (cpphy_tcb_t *) (AllTcb + ((Num - 1) * tcbSize)); cppi->TxLast = cppi->TxPrevEnqueue; /* First build the ring buffer for DMA */ pTcb = (cpphy_tcb_t *) cppi->TxFirst; for(i = 0; i < cppi->NeededDMAtcbs; i++) { pTcb->mode = CB_EOQ_BIT; /* design: start DMA initially */ pTcb->Next = (cpphy_tcb_t *) (((unsigned char *) pTcb) + tcbSize); pTcb = (cpphy_tcb_t *) pTcb->Next; } cppi->TxPrevEnqueue = pTcb; cppi->TxPrevEnqueue->Next = cppi->TxFirst; /* Now enqueue the preallocated tcbs for normal usage */ pTcb = (cpphy_tcb_t *) (((unsigned char *) pTcb) + tcbSize); cppi->TxFirstFree = pTcb; cppi->TxLastFree = cppi->TxFirstFree; for(i = 0; i < Num - cppi->NeededDMAtcbs - 1; i++) { pTcb = (cpphy_tcb_t *)(((unsigned char *) pTcb) + tcbSize); cpphy_if_free_tcb(cppi, pTcb); } # elif defined(CONFIG_MIPS_UR8) /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ cppi->TxFirstFree = (volatile cpphy_tcb_t *) AllTcb; cppi->TxLastFree = cppi->TxFirstFree; pTcb = (cpphy_tcb_t *) cppi->TxFirstFree; for(i = 0; i < Num - 1; i++) { pTcb = (cpphy_tcb_t *)(((unsigned char *) pTcb) + tcbSize); cpphy_if_free_tcb(cppi, pTcb); } # else /*--- #elif defined(CONFIG_MIPS_UR8) ---*/ # warning "Missing init_tcb for this architecture!" # endif /*--- #else ---*/ /*--- #elif defined(CONFIG_MIPS_UR8) ---*/ } cppi->support.tcbs_freed = 0; return ret; } #endif /*--- #if !(defined(CONFIG_FUSIV_VX180) || defined(CONFIG_FUSIV_VX185) || defined(CONFIG_ARCH_PUMA5)) ---*/ /*------------------------------------------------------------------------------------------*\ * Allocation of receive buffer descriptors and data buffers \*------------------------------------------------------------------------------------------*/ #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) static cpmac_err_t cpphy_cppi_init_rcb(cpphy_cppi_t *cppi, int Num, int Size) { int i; cpphy_rcb_t *pRcb, *Next; char *pBuf; char *AllRcb; int rcbSize; int size_malloc; cpmac_err_t ret = CPMAC_ERR_NOERR; unsigned int base = cppi->cpmac_priv->owner->base_addr; /* Align on 16 byte boundary */ rcbSize = (sizeof(cpphy_rcb_t) + 0xf) &~ 0xf; size_malloc = (rcbSize * Num) + 0xf; if(!(AllRcb = (char *) kmalloc(size_malloc, GFP_KERNEL))) { DEB_ERR("[%s] Could not allocate memory\n", __FUNCTION__); ret = CPMAC_ERR_NOMEM; } else { dma_cache_wback_inv(AllRcb, size_malloc); /* keep this address for freeing later */ cppi->RcbStart = AllRcb; AllRcb = CPPHY_VIRT_TO_VIRT_NO_CACHE(AllRcb); memset(AllRcb, 0, size_malloc); /* align to cache line */ AllRcb = (char *) (((unsigned int) AllRcb + 0xf) & ~0xf); cppi->MaxNeedCount = Num - 1; /* need one complete buff to still get rx int */ cppi->RxCurrDequeue = (cpphy_rcb_t *) AllRcb; cppi->RxFirst = cppi->RxCurrDequeue; cppi->RxPrevEnqueue = (cpphy_rcb_t *) (AllRcb + ((Num - 1) *rcbSize)); cppi->RxLast = cppi->RxPrevEnqueue; /* design: descriptors as logical ring buffer (with HW next ptr ring NULL terminated) */ Next = cppi->RxCurrDequeue; pRcb = cppi->RxPrevEnqueue; for(i = 0; i < Num; i++) { pRcb->Next = Next; pRcb->HNext = i ? CPPHY_VIRT_TO_PHYS(Next) : 0; pRcb->Off_BLen = CPPHY_MAX_RX_BUFFER_SIZE; pRcb->mode = CB_OWNERSHIP_BIT; pBuf = (char *) cpphy_cppi_malloc_buffer_startup(Size, CPPHY_TOTAL_RX_BUFFER_SIZE, CPPHY_TOTAL_RX_RESERVED, cppi->cpmac_priv->owner, &pRcb->skb); if(!pBuf) { cpphy_cppi_free_rcb(cppi); DEB_ERR("[%s] No buffer\n", __FUNCTION__); return CPMAC_ERR_NO_BUFFER; } pRcb->BufPtr = CPPHY_VIRT_TO_PHYS(pBuf); Next = pRcb; pRcb = (cpphy_rcb_t *) (((unsigned char *) pRcb) - rcbSize); } /* Set flow control parameters of CPMAC */ /* To set freebuffer count to zero we have to create an overflow of the incremental register */ CPMAC_RX_FREEBUFFER(base, 0) = 0x10000 - CPMAC_RX_FREEBUFFER(base, 0); CPMAC_RX_FREEBUFFER(base, 0) = Num - 1; CPMAC_RX_FLOWTHRESH(base, 0) = 5; /* rx buffer chain to dma */ (CPMAC_RX_HDP(cppi->cpmac_priv->owner->base_addr, 0)) = CPPHY_VIRT_TO_PHYS(cppi->RxFirst); } return ret; } #endif /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ /*----------------------------------------------------------------------------------*\ \*----------------------------------------------------------------------------------*/ cpmac_err_t cpphy_cppi_start_dma(cpphy_cppi_t *cppi __attribute__ ((unused))) { cpmac_err_t ret = CPMAC_ERR_NOERR; # if !(defined(CONFIG_FUSIV_VX180) || defined(CONFIG_FUSIV_VX185) || defined(CONFIG_ARCH_PUMA5)) /* Not needed for this architecture */ # if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) struct net_device *p_dev = (struct net_device *) cppi->cpmac_priv->owner; # endif /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ DEB_TRC("cpphy_channel_start_dma, init\n"); if(cppi->hw_state < CPPHY_HW_ST_OPENED) { /* hardware has never been opened, leave immediately */ DEB_WARN("[%s] hw not initialized (state %u)\n", __FUNCTION__, cppi->hw_state); ret = CPMAC_ERR_HW_NOT_INITIALIZED; } else { if(cppi->TxOpen) { DEB_WARN("[%s] tx, already open\n", __FUNCTION__); } else { /* Initialize buffer memory for the tx handling */ if((ret = cpphy_cppi_init_tcb(cppi)) == CPMAC_ERR_NOERR) { # if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) CPMAC_TX_INTMASK_SET(p_dev->base_addr) = (1 << cppi->TxChannel); # elif defined(CONFIG_MIPS_UR8) /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ DEB_TEST("Enable tx interrupts\n"); cppi->cpmac_priv->UR8_QUEUE->tx_int_enable_set.Bits.txcq0_int_enable = 1; # else /*--- #elif defined(CONFIG_MIPS_UR8) ---*/ # warning "Tx-IRQ-Setup for unknown architecture missing" # endif /*--- #else ---*/ /*--- #elif defined(CONFIG_MIPS_UR8) ---*/ cppi->TxOpen = 1; } else { DEB_ERR("[%s] failed!\n", __FUNCTION__); } } if(ret == CPMAC_ERR_NOERR) { if(cppi->RxOpen) { DEB_WARN("[%s] rx, already open\n", __FUNCTION__); } else { /* Initialize buffer memory for the rx handling */ # if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) if((ret = cpphy_cppi_init_rcb(cppi, CPPHY_MAX_RX_BUFFERS, CPPHY_MAX_RX_BUFFER_SIZE)) == CPMAC_ERR_NOERR) { CPMAC_RX_INTMASK_SET(p_dev->base_addr) = (1 << 0); # elif defined(CONFIG_MIPS_UR8) /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ if((ret = cpphy_if_g_init_rcb(cppi, CPPHY_MAX_RX_BUFFERS)) == CPMAC_ERR_NOERR) { cppi->cpmac_priv->UR8_QUEUE->rx_int_enable_set.Register = (1 << UR8_RX_QUEUE); # else /*--- #elif defined(CONFIG_MIPS_UR8) ---*/ # warning "Rx-IRQ-Setup for unknown architecture missing" if(0) { # endif /*--- #else ---*/ /*--- #elif defined(CONFIG_MIPS_UR8) ---*/ cppi->RxOpen = 1; } else { DEB_ERR("cpphy_cppi_init_rcb failed!\n"); if(cppi->TxOpen) { cppi->TxOpen = 0; cpphy_cppi_free_tcb(cppi); } } } } } # endif /*--- #if !(defined(CONFIG_FUSIV_VX180) || defined(CONFIG_FUSIV_VX185) || defined(CONFIG_ARCH_PUMA5)) ---*/ /* Not needed for this architecture */ return ret; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ void cpphy_cppi_rx_dma_pause(cpphy_cppi_t *cppi __attribute__ ((unused))) { # if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) struct net_device *p_dev = cppi->cpmac_priv->owner; CPMAC_RX_INTMASK_CLEAR(p_dev->base_addr) = (1 << 0); # elif defined(CONFIG_MIPS_UR8) /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ cppi->cpmac_priv->UR8_QUEUE->rx_int_enable_clear.Register = (1 << UR8_RX_QUEUE); # endif /*--- #elif defined(CONFIG_MIPS_UR8) ---*/ } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ void cpphy_cppi_rx_dma_restart(cpphy_cppi_t *cppi __attribute__ ((unused))) { # if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) struct net_device *p_dev = cppi->cpmac_priv->owner; CPMAC_RX_INTMASK_SET(p_dev->base_addr) = (1 << 0); # elif defined(CONFIG_MIPS_UR8) /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ cppi->cpmac_priv->UR8_QUEUE->rx_int_enable_set.Register = (1 << UR8_RX_QUEUE); # endif /*--- #elif defined(CONFIG_MIPS_UR8) ---*/ } /*----------------------------------------------------------------------------------*\ * Start of Teardown. \*----------------------------------------------------------------------------------*/ cpmac_err_t cpphy_cppi_teardown(cpphy_cppi_t *cppi, unsigned int Mode) { int timeout = 0; # if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) struct net_device *p_dev = cppi->cpmac_priv->owner; # endif /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ if(Mode & CPPHY_TX_TEARDOWN) { if(cppi->TxTeardownPending) { DEB_WARN("cpphy_cppi_teardown, tx already pending\n"); } else { if(cppi->hw_state < CPPHY_HW_ST_OPENED) { /* hardware has never been opened, leave immediately */ DEB_INFO("[%s] tx, hw in state %u\n", __FUNCTION__, cppi->hw_state); } else if(!cppi->TxOpen) { DEB_WARN("[%s] tx, already torn down\n", __FUNCTION__); } else { /* set teardown flag for int handler */ cppi->TxTeardownPending = Mode; /* request TX channel teardown */ # if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) (CPMAC_TX_TEARDOWN(p_dev->base_addr)) = cppi->TxChannel; # elif defined(CONFIG_MIPS_UR8) /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ # endif /*--- #elif defined(CONFIG_MIPS_UR8) ---*/ /* if mode is blocking: wait until teardown has completed */ if(Mode & CPPHY_BLOCKING_TEARDOWN) { timeout = 0; while(cppi->TxTeardownPending & CPPHY_TX_TEARDOWN) { msleep_interruptible(2); timeout++; if(timeout > 100) { DEB_WARN("cpphy_cppi_teardown, break tx wait\n"); break; } } } } } } if(Mode & CPPHY_RX_TEARDOWN) { if(cppi->RxTeardownPending) { DEB_WARN("cpphy_cppi_teardown, rx already pending\n"); } else { if(cppi->hw_state < CPPHY_HW_ST_OPENED) { DEB_INFO("[%s] rx, hw in state %u\n", __FUNCTION__, cppi->hw_state); } else if(!cppi->RxOpen) { /* hardware has never been opened, leave immediately */ DEB_WARN("cpphy_cppi_teardown, rx, already torn down\n"); } else { /* set teardown flag for int handler */ cppi->RxTeardownPending = Mode; /* request RX channel teardown */ # if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) CPMAC_RX_TEARDOWN(p_dev->base_addr) = 0; # elif defined(CONFIG_MIPS_UR8) /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ # endif /*--- #elif defined(CONFIG_MIPS_UR8) ---*/ /* if mode is blocking: wait until teardown has completed */ if(Mode & CPPHY_BLOCKING_TEARDOWN) { timeout = 0; while(cppi->RxTeardownPending & CPPHY_RX_TEARDOWN) { msleep_interruptible(2); timeout++; if(timeout > 100) { DEB_WARN("cpphy_cppi_teardown, break rx wait\n"); break; } } } } } } return CPMAC_ERR_NOERR; }