/*------------------------------------------------------------------------------------------*\ * Copyright (C) 2006,2007,2008,2009,2010 AVM GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA \*------------------------------------------------------------------------------------------*/ #include #include #include #include #include #include #include #include #include #include #include #if !defined(CONFIG_NETCHIP_ADM69961) #define CONFIG_NETCHIP_ADM69961 #endif #include #include #include "cpmac_if.h" #include "cpmac_const.h" #include "cpmac_debug.h" #include "cpmac_main.h" #include "cpmac_reg.h" #include "cpphy_const.h" #include "cpphy_types.h" #include "cpphy_mdio.h" #include "cpphy_mgmt.h" #include "cpphy_cppi.h" #include "cpphy_main.h" #include "cpphy_if.h" #include "cpphy_if_g.h" #include "cpphy_misc.h" #include "adm6996.h" #include "cpphy_adm6996.h" #include "cpphy_ar8216.h" #include "cpphy_switch.h" #if !(defined(CONFIG_FUSIV_VX180) || defined(CONFIG_FUSIV_VX185) || defined(CONFIG_ARCH_PUMA5)) /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ void cpphy_if_free_tcb(cpphy_cppi_t *cppi, cpphy_tcb_t *tcb) { /* TODO: remove NULL checks; IRQ disabling not necessary? */ if(tcb == NULL) { DEB_ERR("[%s] tcb == NULL\n", __FUNCTION__); return; } if(tcb->IsDynamicallyAllocated) { kfree((void *) tcb->KMallocPtr); cppi->support.tcbs_freed_dynamic++; return; } local_irq_disable(); if(cppi->TxLastFree == NULL) { DEB_ERR("[%s] cppi->TxLastFree == NULL\n", __FUNCTION__); return; } tcb->Next = NULL; cppi->TxLastFree->Next = tcb; cppi->TxLastFree = tcb; cppi->support.tcbs_freed++; local_irq_enable(); } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ cpphy_tcb_t *cpphy_if_alloc_tcb(cpphy_cppi_t *cppi) { cpphy_tcb_t *tcb; unsigned int tcbSize = sizeof (cpphy_tcb_t) + 0x3f; /* Enough size for aligning struct */ local_irq_disable(); if(cppi->TxFirstFree->Next == NULL) { cpphy_tcb_t *ptr; /* TODO Make freeing the allocated space possible */ ptr = kmalloc(tcbSize, GFP_ATOMIC); if(ptr == NULL) { local_irq_enable(); DEB_ERR("[%s] Unable to allocate a new tcb!\n", __FUNCTION__); return NULL; } tcb = (cpphy_tcb_t *)(((unsigned int) ptr + 0x3f) & ~0x3f); dma_cache_wback_inv((unsigned long) tcb, sizeof(cpphy_tcb_t)); tcb = (cpphy_tcb_t *) CPPHY_VIRT_TO_VIRT_NO_CACHE(tcb); tcb->KMallocPtr = (void *) ptr; tcb->IsDynamicallyAllocated = 1; cppi->support.tcbs_alloced_dynamic++; /*--- DEB_TEST("[cpphy_if_alloc_tcb] Allocated new tcb 0x%p\n", tcb); ---*/ local_irq_enable(); return tcb; } /*--- atomic_sub(1, &cppi->FreeTcbCount); ---*/ tcb = (cpphy_tcb_t *) cppi->TxFirstFree; cppi->TxFirstFree = cppi->TxFirstFree->Next; cppi->support.tcbs_alloced++; local_irq_enable(); return tcb; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static void cpphy_if_tx_stop_queue(cpphy_cppi_t *cppi) { t_cpphy_switch_config *switch_config = &cppi->mdio->switch_config; unsigned char dev_num; local_irq_disable(); for(dev_num = 0; dev_num < switch_config->devices; dev_num++) { if(!netif_queue_stopped(switch_config->device[dev_num].net_device)) { DEB_TRC("[%s] Stopping queue for device '%s'\n", __FUNCTION__, switch_config->device[dev_num].net_device->name); netif_stop_queue(switch_config->device[dev_num].net_device); } } DEB_TRC("[%s] Stopping tx queue\n", __FUNCTION__); netif_stop_queue(cppi->cpmac_priv->owner); local_irq_enable(); } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static void cpphy_if_tx_restart_queue(cpphy_cppi_t *cppi) { local_irq_disable(); if(netif_carrier_ok(cppi->cpmac_priv->owner)) { if(netif_queue_stopped(cppi->cpmac_priv->owner)) { t_cpphy_switch_config *switch_config = &cppi->mdio->switch_config; unsigned char dev_num; DEB_TRC("[%s] Starting tx queue\n", __FUNCTION__); netif_start_queue(cppi->cpmac_priv->owner); for(dev_num = 0; dev_num < switch_config->devices; dev_num++) { if( netif_carrier_ok(switch_config->device[dev_num].net_device) && netif_queue_stopped(switch_config->device[dev_num].net_device)) { DEB_TRC("[%s] Restarting queue for device '%s'\n", __FUNCTION__, switch_config->device[dev_num].net_device->name); netif_start_queue(switch_config->device[dev_num].net_device); } } } } local_irq_enable(); } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ void cpphy_if_tx_complete(cpphy_cppi_t *cppi, struct sk_buff *skb, unsigned int status) { cpmac_priv_t *cpmac_priv = cppi->cpmac_priv; unsigned int enough_free; unsigned int prio_queue; if(skb == NULL) { DEB_ERR("[%s] skb is NULL!\n", __FUNCTION__); return; } prio_queue = skb->uniq_id >> 24; if(status == CPMAC_ERR_NOERR) { /*--- DEB_DEBUG("[%s] %u bytes sent\n", __FUNCTION__, skb->len); ---*/ cppi->TxLastCompleted = jiffies; cpmac_priv->net_dev_stats.tx_packets++; cpmac_priv->net_dev_stats.tx_bytes += skb->len; } else { DEB_INFO("[%s] %u bytes dropped (%u)\n", __FUNCTION__, skb->len, status); cpmac_priv->local_stats_tx_errors++; } enough_free = !atomic_read(&cppi->TxPrioQueues.q[prio_queue].DMAFree); atomic_inc(&cppi->TxPrioQueues.q[prio_queue].DMAFree); cppi->TxPrioQueues.q[prio_queue].BytesDequeued += skb->len; # if defined(CONFIG_AVM_SIMPLE_PROFILING) skb_trace(skb, 20); # endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ dev_kfree_skb_any(skb); /* Wakeup of queue should be here, if there is more than one caller of this function */ if(enough_free) { cpphy_if_tx_restart_queue(cppi); cpphy_if_data_from_queues(cppi); } } /*------------------------------------------------------------------------------------------*\ * Garbage collect tx DMA queue * \*------------------------------------------------------------------------------------------*/ #if 0 #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) void cpphy_if_gc_tx_queue(cpphy_cppi_t *cppi) { register cpphy_tcb_t *tcb; static atomic_t only_one = { counter: 0 }; if(atomic_add_return(1, &only_one) != 1) { return; } while ((tcb = (cpphy_tcb_t *) cppi->TxFirst) != NULL) { dma_cache_inv((unsigned long)(void *)tcb, sizeof(cpphy_tcb_t)); if (tcb->mode & CB_OWNERSHIP_BIT) { atomic_set(&only_one, 0); return; } if(tcb->skb != NULL) { cpphy_if_tx_complete(cppi, (struct sk_buff *) tcb->skb, CPMAC_ERR_NOERR); } tcb->skb = NULL; /*----------------------------------------------------------------------------------*\ * The last entry stays in the queue. Therefor the LastTx pointer needs not to be * touched. \*----------------------------------------------------------------------------------*/ if(tcb == cppi->TxLast) { atomic_set(&only_one, 0); return; } cppi->TxFirst = tcb->Next; /*--- cpphy_if_free_tcb(tcb); ---*/ } atomic_set(&only_one, 0); return; } #endif /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ #endif /*--- #if 0 ---*/ /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ inline void cpphy_if_tcb_enqueue(cpphy_cppi_t *cppi, unsigned char priority, cpphy_tcb_t *tcb) { cpphy_tcb_queue_t *queue = &cppi->TxPrioQueues.q[priority]; unsigned long flags; spin_lock_irqsave(&queue->lock, flags); tcb->Next = NULL; if(queue->Last != NULL) { queue->Last->Next = tcb; } queue->Last = tcb; if(queue->First == NULL) { queue->First = tcb; } atomic_dec(&queue->Free); atomic_dec(&cppi->TxPrioQueues.SummedFree); spin_unlock_irqrestore(&queue->lock, flags); } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static inline cpphy_tcb_t *cpphy_if_tcb_dequeue(cpphy_cppi_t *cppi, unsigned char priority) { cpphy_tcb_queue_t *queue = &cppi->TxPrioQueues.q[priority]; cpphy_tcb_t *tcb; unsigned long flags; spin_lock_irqsave(&queue->lock, flags); tcb = (cpphy_tcb_t *) queue->First; if(queue->First != NULL) { queue->First = queue->First->Next; atomic_inc(&cppi->TxPrioQueues.SummedFree); atomic_inc(&queue->Free); } if(queue->First == NULL) { queue->Last = NULL; } spin_unlock_irqrestore(&queue->lock, flags); return tcb; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) void cpphy_if_data_to_phy_dma(cpphy_cppi_t *cppi, cpphy_tcb_t *newtcb) { cpphy_tcb_t *TmpPrevEnqueue; cpmac_priv_t *cpmac_priv = cppi->cpmac_priv; unsigned int base = cpmac_priv->owner->base_addr; cpphy_tcb_t *tcb; tcb = (cpphy_tcb_t *) cppi->TxPrevEnqueue->Next; /* Setup Tx mode and size */ memcpy(tcb, newtcb, 16); tcb->skb = newtcb->skb; cpphy_if_free_tcb(cppi, newtcb); /* increment management ptr */ TmpPrevEnqueue = cppi->TxPrevEnqueue; cppi->TxPrevEnqueue = tcb; /* dma may go on by chaining current tx descriptor */ *((volatile unsigned int *) CPPHY_VIRT_TO_VIRT_NO_CACHE(&TmpPrevEnqueue->HNext)) = CPPHY_VIRT_TO_PHYS(tcb); /* If tx complete int handling has stopped, restart here. */ /* It does not work to check CB_EOQ_BIT here and restart dma if bit is set: Bug of cpmac? */ if(!cppi->TxDmaActive) { cppi->TxDmaActive = 1; /* write CPPI TX HDP */ (CPMAC_TX_HDP(base, cppi->TxChannel)) = CPPHY_VIRT_TO_PHYS(tcb); } } #endif /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ #if defined(CPMAC_DMA_TX_PRIOQUEUE_DEBUG) static char buf[60]; static int bufidx = 0; static void log_to_dmaqueue(int priority) { if(bufidx == sizeof(buf)-1) { buf[bufidx] = 0; DEB_INFO("cpmac: tx %s\n", buf); bufidx = 0; } buf[bufidx++] = '0'+ (priority % CPPHY_PRIO_QUEUES); } #endif /*--- #if defined(CPMAC_DMA_TX_PRIOQUEUE_DEBUG) ---*/ #endif /*--- #if !(defined(CONFIG_FUSIV_VX180) || defined(CONFIG_FUSIV_VX185) || defined(CONFIG_ARCH_PUMA5)) ---*/ /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ unsigned int cpphy_if_check_external_tagging(cpphy_cppi_t *cppi, cpphy_tcb_t *tcb) { cpphy_mdio_t *mdio = cppi->mdio; if((tcb != NULL) && (CPMAC_VLAN_IS_802_1Q_FRAME(tcb->skb->data))) { /* VLAN should be used */ unsigned char is_tagged = mdio->switch_config.wanport_default_vid != CPMAC_VLAN_GET_VLAN_ID(tcb->skb->data); unsigned char keep_tagging; assert(mdio->switch_config.wanport < AVM_CPMAC_MAX_PORTS); /* To ease Klocwork checking */ keep_tagging = mdio->switch_status.port[mdio->switch_config.wanport].keep_tag_outgoing; if(keep_tagging != is_tagged) { DEB_TRC("[%s] Need to switch tagging\n", __FUNCTION__); /* Need to change the keep_tagging setting */ if( (unsigned int) atomic_read(&cppi->TxPrioQueues.q[CPPHY_PRIO_QUEUE_WAN].DMAFree) != cppi->TxPrioQueues.q[CPPHY_PRIO_QUEUE_WAN].MaxDMAFree) { /* Wait until there are no more WAN packets in *\ * the DMA queue before the tagging can be * \* switched. */ DEB_TRC("[%s] Waiting, until the WAN DMA queue is empty\n", __FUNCTION__); return 0; } if(mdio->wan_tagging_enable != is_tagged) { mdio->wan_tagging_enable = is_tagged; if(mdio->f->set_wan_keep_tagging) { cpphy_mgmt_work_add(mdio, CPMAC_WORK_TOGGLE_VLAN, mdio->f->set_wan_keep_tagging, 0); } } return 0; } } return 1; } #if !(defined(CONFIG_FUSIV_VX180) || defined(CONFIG_FUSIV_VX185) || defined(CONFIG_ARCH_PUMA5)) /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ void cpphy_if_data_from_queues(cpphy_cppi_t *cppi) { cpphy_tcb_t *tcb; unsigned int priority, count; if(atomic_add_return(1, &cppi->dequeue_running) != 1) { DEB_ERR("[%s] conflict\n", __FUNCTION__); /* FIXME */ return; } /* Part 2: Find next skb from appropriate queue and send it */ for( ;; ) { count = 0; for(priority = 0; priority < CPPHY_PRIO_QUEUES; priority++) { if(atomic_read(&cppi->TxPrioQueues.q[priority].Free) == (int) cppi->TxPrioQueues.q[priority].MaxSize) { continue; /* TxPrioQueue is empty */ } if(atomic_read(&cppi->TxPrioQueues.q[priority].DMAFree) <= 0) { DEB_DEBUG("[%s] DMA queue %u full\n", __FUNCTION__, priority); /* Always stopping the whole cpmac0 queue when one priority * queue is full is a problem, because that would prevent * traffic for the other priority queue. * As the main reason for the stop_queue is detecting the UR8 * LAN tx hangup, this is only done when the current priority * queue is full and no packets have been completed by the UR8 * hardware. */ if(time_after(jiffies, cppi->TxLastCompleted + CPMAC_TX_TIMEOUT)) { DEB_TRC("[%s] Queue %u full, completion outstanding. Stopping tx queue.\n", __FUNCTION__, priority); cpphy_if_tx_stop_queue(cppi); } continue; } count++; if(!cppi->TxPrioQueues.q[priority].Pause) { tcb = (cpphy_tcb_t *) cppi->TxPrioQueues.q[priority].First; /* Find out, if the tagging has to be changed */ if( cppi->mdio->f->check_external_tagging && (priority == CPPHY_PRIO_QUEUE_WAN)) { /* Is this WAN traffic? */ if(!cppi->mdio->f->check_external_tagging(cppi, tcb)) { count--; continue; } } tcb = cpphy_if_tcb_dequeue(cppi, priority); if(tcb != NULL) { atomic_dec(&cppi->TxPrioQueues.q[priority].DMAFree); # ifdef CPMAC_DMA_TX_PRIOQUEUE_DEBUG log_to_dmaqueue(priority); # endif # if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) cpphy_if_data_to_phy_dma(cppi, tcb); # elif defined(CONFIG_MIPS_UR8) /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ cpphy_if_g_data_to_phy_dma(cppi, tcb); # else /*--- #elif defined(CONFIG_MIPS_UR8) ---*/ # warning "Missing cpphy_if_g_data_to_phy_dma for this architecture" # endif /*--- #else ---*/ /*--- #elif defined(CONFIG_MIPS_UR8) ---*/ } cppi->TxPrioQueues.q[priority].Pause = cppi->TxPrioQueues.q[priority].PauseInit; } else { assert(cppi->TxPrioQueues.q[priority].Pause <= cppi->TxPrioQueues.q[priority].PauseInit); cppi->TxPrioQueues.q[priority].Pause--; } } if((count == 0) && (atomic_sub_return(1, &cppi->dequeue_running) == 0)) { break; } } /* Part 3 (optional): Reschedule queue priorities */ } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) cpmac_err_t cpphy_if_data_to_phy(cpmac_phy_handle_t phy_handle, struct sk_buff *skb) { cpphy_cppi_t *cppi = &((cpphy_global_t *)phy_handle)->cppi; cpphy_mdio_t *mdio = &((cpphy_global_t *)phy_handle)->mdio; unsigned int priority, frame_length; cpphy_tcb_t *tcb; if(!cppi->TxOpen || cppi->TxTeardownPending || (mdio->state != CPPHY_MDIO_ST_LINKED)) { DEB_INFO("[%s] Channel closing or not opened (TxOpen = %#x, TeardownPend = %#x, state = %#x)\n", __FUNCTION__, cppi->TxOpen, cppi->TxTeardownPending, mdio->state); return CPMAC_ERR_CHAN_NOT_OPEN; } /* We do not want a zero length packet */ if(CPMAC_VLAN_IS_0_LEN_FRAME(skb->data)) { # if defined(CONFIG_AVM_SIMPLE_PROFILING) skb_trace(skb, 21); # endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ dev_kfree_skb_any(skb); cppi->cpmac_priv->local_stats_tx_errors++; DEB_TRC("[%s] packet dropped!\n", __FUNCTION__); return CPMAC_ERR_NOERR; } /******************************************\ * Part 1: Add skb(s) to correct queue(s) * \******************************************/ /* Check which queue to use */ priority = skb->uniq_id >> 24; assert(priority < CPPHY_PRIO_QUEUES); /* supply min ether frame size */ frame_length = skb->len; if(frame_length < 60) { /*--- if(mdio->pad_on) { ---*/ /*--- DEB_TEST("[cpphy_if_data_to_phy] (%u) before padding: %64B\n", skb->len, skb->len, skb->data); ---*/ /* FIXME */ /*--- skb = skb_padto(skb, 60); ---*/ /* + 4 byte hardware added fcs -> min frame length of 64 bytes */ /*--- if(skb == NULL) { ---*/ /*--- DEB_ERR("Out of memory when trying to pad a short packet in the send path.\n"); ---*/ /*--- return CPMAC_ERR_NOMEM; ---*/ /*--- } ---*/ /*--- DEB_TEST("[cpphy_if_data_to_phy] (%u) after padding : %64B\n", skb->len, 64, skb->data); ---*/ /* FIXME */ /*--- } ---*/ frame_length = 60; /* + 4 byte hardware added fcs -> min frame length of 64 bytes */ } /* prepare tx data to be available for dma */ dma_cache_wback_inv((unsigned long) skb->data, frame_length); if(atomic_read(&cppi->TxPrioQueues.q[priority].Free) == 0) return CPMAC_ERR_NO_BUFFER; /* Allocate tcb, set it up with the skb data */ tcb = cpphy_if_alloc_tcb(cppi); if(tcb == NULL) { return CPMAC_ERR_NO_BUFFER; } tcb->HNext = 0; tcb->BufPtr = CPPHY_VIRT_TO_PHYS((unsigned int *)skb->data); tcb->Off_BLen = frame_length; tcb->mode = (frame_length | CB_SOF_BIT | CB_EOF_BIT | CB_OWNERSHIP_BIT); tcb->Next = NULL; tcb->skb = skb; /* Enqueue tcb to the corresponding priority queue */ cpphy_if_tcb_enqueue(cppi, priority, tcb); cppi->TxPrioQueues.q[priority].BytesEnqueued += skb->len; cpphy_if_data_from_queues(cppi); return CPMAC_ERR_NOERR; } #endif /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ /*----------------------------------------------------------------------------------*\ \*----------------------------------------------------------------------------------*/ cpmac_err_t cpphy_if_control_req(cpmac_phy_handle_t phy_handle, cpmac_control_req_t control, ...) { va_list param; cpmac_err_t ret = CPMAC_ERR_NOERR; cpphy_mdio_t *mdio = &((cpphy_global_t *) phy_handle)->mdio; cpphy_cppi_t *cppi = &((cpphy_global_t *) phy_handle)->cppi; switch(control) { case CPMAC_CONTROL_REQ_IS_SWITCH: if(!mdio->switch_config.is_switch) { ret = CPMAC_ERR_NO_SWITCH; } break; case CPMAC_CONTROL_REQ_MULTI_SINGLE: cpphy_cppi_set_multi_promiscous (&((cpphy_global_t *)phy_handle)->cppi, 2, 0); break; case CPMAC_CONTROL_REQ_MULTI_ALL: cpphy_cppi_set_multi_promiscous (&((cpphy_global_t *)phy_handle)->cppi, 1, 0); break; case CPMAC_CONTROL_REQ_PROMISCOUS: cpphy_cppi_set_multi_promiscous (&((cpphy_global_t *)phy_handle)->cppi, 0, 1); break; case CPMAC_CONTROL_REQ_HW_STATUS: { struct net_device_stats *stats; va_start(param, control); stats = va_arg(param, void *); stats->collisions = 0; stats->rx_crc_errors = 0; stats->rx_dropped = 0; stats->rx_errors = 0; stats->rx_fifo_errors = 0; stats->rx_frame_errors = 0; stats->rx_length_errors = 0; stats->rx_missed_errors = 0; stats->rx_over_errors = 0; stats->tx_carrier_errors = 0; stats->tx_errors = 0; stats->tx_fifo_errors = 0; if(mdio->f->update_hw_status) { mdio->f->update_hw_status(mdio, stats); } cpphy_cppi_update_hw_status(cppi, stats); va_end(param); } break; case CPMAC_CONTROL_REQ_TEARDOWN: { unsigned int mode; va_start(param, control); mode = va_arg(param, unsigned int); ret = cpphy_cppi_teardown(cppi, mode); va_end(param); } break; case CPMAC_CONTROL_REQ_START_DMA: ret = cpphy_cppi_start_dma(&((cpphy_global_t *) phy_handle)->cppi); break; case CPMAC_CONTROL_REQ_PORT_COUNT: /* to check: adjust port count in case of empty cpphy */ /* design: abuse ret as port count */ /*--- ret = (cpmac_err_t)((cpphy_global_t *) phy_handle)->cpmac_switch ? 4 : 1; ---*/ /* TODO Should this depend on the device/PHY? */ ret = (cpmac_err_t) cpmac_global.ports; break; case CPMAC_CONTROL_REQ_GENERIC_CONFIG: { void *ioctl_struct; struct avm_cpmac_ioctl_struct avm_ioctl; va_start(param, control); ioctl_struct = va_arg(param, void *); copy_from_user((void *) &avm_ioctl, ioctl_struct, sizeof(struct avm_cpmac_ioctl_struct)); ret = cpphy_switch_ioctl(mdio, &avm_ioctl); copy_to_user(ioctl_struct, (void *) &avm_ioctl, sizeof(struct avm_cpmac_ioctl_struct)); va_end(param); } break; default: DEB_INFO("[%s] unhandled control %u\n", __FUNCTION__, control); ret = CPMAC_ERR_ILL_CONTROL; break; } return ret; } /*----------------------------------------------------------------------------------*\ \*----------------------------------------------------------------------------------*/ void cpphy_if_isr_end(cpmac_phy_handle_t phy_handle) { CPMAC_MAC_EOI_VECTOR(((cpphy_global_t *)phy_handle)->cpmac_priv->owner->base_addr) = 0; } /*----------------------------------------------------------------------------------*\ \*----------------------------------------------------------------------------------*/ #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) void cpphy_if_isr_tasklet(unsigned long context) { cpmac_phy_handle_t phy_handle = (cpmac_phy_handle_t)context; unsigned int IntVec; cpphy_cppi_t *cppi = &((cpphy_global_t *) phy_handle)->cppi; cpmac_priv_t *cpmac_priv = ((cpphy_global_t *) phy_handle)->cpmac_priv; /* Verify proper device state - important because a call prior to Open would *\ \* result in a lockup */ if(cppi->hw_state != CPPHY_HW_ST_OPENED) return; IntVec = CPMAC_MAC_IN_VECTOR(cpmac_priv->owner->base_addr); if(IntVec == 0) { return; } /*--- DEB_TRC("[cpphy_if_isr_tasklet] int vec 0x%X\n", IntVec); ---*/ if(IntVec & MAC_IN_VECTOR_TX_INT_OR) { /* only chan 0 in use */ cpphy_cppi_tx_int(cppi); } if(IntVec & MAC_IN_VECTOR_RX_INT_OR) { /* only chan 0 in use */ if((IntVec >> 8) & 0x7) { DEB_ERR("[%s] unexpected rx chan %u\n", __FUNCTION__, (IntVec >> 8) & 0x7); } else { cpphy_cppi_rx_int(cppi); } } if(IntVec & MAC_IN_VECTOR_HOST_INT) { /* severe problem */ DEB_ERR("[%s] status %08X for base:%X\n", __FUNCTION__, CPMAC_MACSTATUS(cpmac_priv->owner->base_addr), (unsigned int) cpmac_priv->owner->base_addr); /* to do: reset hardware */ } if(IntVec & (MAC_IN_VECTOR_TX_INT_OR | MAC_IN_VECTOR_RX_INT_OR | MAC_IN_VECTOR_HOST_INT)) { CPMAC_MAC_EOI_VECTOR(cpmac_priv->owner->base_addr) = 0; } } #endif /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ /*----------------------------------------------------------------------------------*\ \*----------------------------------------------------------------------------------*/ cpmac_err_t cpphy_if_init(cpmac_phy_handle_t phy_handle, cpmac_priv_t *cpmac_priv) { cpmac_err_t ret = CPMAC_ERR_NOERR; struct net_device *p_dev = cpmac_priv->owner; char *mac_name = NULL; char *mac_string = NULL; ((cpphy_global_t *) phy_handle)->cpmac_priv = cpmac_priv; ((cpphy_global_t *) phy_handle)->cppi.cpmac_priv = cpmac_priv; /* reset of cpmac and phy */ # if defined(CONFIG_MIPS_AR7) if(((cpphy_global_t *) phy_handle)->high_phy) { cpmac_priv->mac_reset_bit = EMACB_RESET_BIT; /* to check: reset although no internal phy on high cpmac */ cpmac_priv->phy_reset_bit = -1; /* signal: no bit to reset */ cpmac_priv->mac_pdown_bit = PDCR_BIT_EMAC1; cpmac_priv->intr = AR7INT_CPMAC1; p_dev->if_port = CPPHY_CPMAC_HIGH_PORT_ID; p_dev->base_addr = AR7_CPMAC1_BASE; mac_name = "macb"; } else # endif /*--- #if defined(CONFIG_MIPS_AR7) ---*/ # if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) { cpmac_priv->mac_reset_bit = EMACA_RESET_BIT; cpmac_priv->phy_reset_bit = EMAC_PHY_RESET_BIT; cpmac_priv->mac_pdown_bit = PDCR_BIT_EMAC0; # if defined(CONFIG_MIPS_AR7) cpmac_priv->intr = AR7INT_CPMAC0; p_dev->base_addr = AR7_CPMAC0_BASE; # else /*--- #if defined(CONFIG_MIPS_AR7) ---*/ cpmac_priv->intr = OHIOINT_CPMAC0; p_dev->base_addr = OHIO_CPMAC0_BASE; # endif /*--- #else ---*/ /*--- #if defined(CONFIG_MIPS_AR7) ---*/ p_dev->if_port = CPPHY_CPMAC_LOW_PORT_ID; mac_name = "maca"; } avm_reset_device(cpmac_priv->mac_reset_bit, 100); if(cpmac_priv->phy_reset_bit != -1) { avm_reset_device (cpmac_priv->phy_reset_bit, 100); } # elif defined(CONFIG_MIPS_UR8) /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ cpmac_priv->UR8_QUEUE = (struct ur8_queue_manager *) UR8_NWSS_QUEUE; cpmac_priv->UR8_NWSS = (struct ur8_nwss_register *)&(*(volatile unsigned int *)(UR8_NWSS_BASE)); if(((cpphy_global_t *) phy_handle)->mdio.high_phy) { cpmac_priv->CPGMAC_F = (struct cpgmac_f_regs *) UR8_CPMAC1_BASE; mac_name = "macb"; } else { cpmac_priv->CPGMAC_F = (struct cpgmac_f_regs *) UR8_CPMAC0_BASE; mac_name = "maca"; } # else /*--- #elif defined(CONFIG_MIPS_UR8) ---*/ # warning "No reset routine for PHY" mac_name = "maca"; # endif /*--- #else ---*/ /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ /*--- wait 100 ms ---*/ set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ / 10); # if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) avm_take_device_out_of_power_down(cpmac_priv->mac_pdown_bit); # endif /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ mac_string = prom_getenv(mac_name); if(!mac_string) { mac_string="08.00.28.32.06.02"; DEB_ERR("Error getting mac from Boot enviroment for %s\n", p_dev->name); DEB_ERR("Using default mac address: %s\n",mac_string); DEB_ERR("Use Bootloader command:\n"); DEB_ERR(" setenv %s xx.xx.xx.xx.xx.xx\n",mac_name); DEB_ERR("to set mac address\n"); } DEB_INFO("[%s] dev %s has mac addr: %s\n", __FUNCTION__, p_dev->name, mac_string); cpphy_misc_str2eaddr(p_dev->dev_addr, mac_string); /* Use the external PHY only for the switch variants of the FBox */ # if defined(CONFIG_MIPS_OHIO) if(((cpphy_global_t *) phy_handle)->mdio.switch_config.is_switch) { /* Reset PHY, leave it in reset state, because we use the external PHY */ if(cpmac_priv->phy_reset_bit != -1) { avm_put_device_into_reset (cpmac_priv->phy_reset_bit); } /* Delay 200ms */ set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ / 5); /* MII pins are connected to the MII interface on the EMAC0 module */ *((volatile unsigned int *) OHIO_MII_SEL_REG) = 1; } # endif /*--- #if defined(CONFIG_MIPS_OHIO) ---*/ # if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) /* Tasklet is initialized at the isr registeration time. */ cpmac_priv->dev_size = 0x800; /* to check */ request_mem_region(p_dev->base_addr, cpmac_priv->dev_size, p_dev->name); # endif /*--- #if defined(CONFIG_MIPS_AR7) || defined(CONFIG_MIPS_OHIO) ---*/ cpphy_main_open((cpphy_global_t *) phy_handle, p_dev); return ret; } /*----------------------------------------------------------------------------------*\ \*----------------------------------------------------------------------------------*/ void cpphy_if_deinit(cpmac_phy_handle_t phy_handle) { cpmac_priv_t *cpmac_priv = ((cpphy_global_t *) phy_handle)->cpmac_priv; struct net_device *p_dev = (struct net_device *) (cpmac_priv->owner); cpphy_cppi_t *cppi = &((cpphy_global_t *) phy_handle)->cppi; DEB_TRC("[%s]\n", __FUNCTION__); cpphy_main_close(cppi); DEB_INFO("[%s] device %s closed\n", __FUNCTION__, p_dev->name); /* Buffer/descriptor resources may still need to be freed if a Close Mode 1 was performed prior to Shutdown - clean up here */ if(cppi->RcbStart) { cpphy_cppi_free_rcb(cppi); } if(cppi->TcbStart) { cpphy_cppi_free_tcb(cppi); } release_mem_region(p_dev->base_addr, cpmac_priv->dev_size); } #endif /*--- #if !(defined(CONFIG_FUSIV_VX180) || defined(CONFIG_FUSIV_VX185) || defined(CONFIG_ARCH_PUMA5)) ---*/