/* * GPL LICENSE SUMMARY * * Copyright(c) 2012-2015 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * Contact Information: * Intel Corporation * 2200 Mission College Blvd. * Santa Clara, CA 97052 * */ /* UDMA Driver main stack */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "udma_hw.h" #include "udma_main.h" #define PDE_DATA(_inode) (PDE(_inode)->data) /* Rx buffer size definitions */ #define UDMA_MIN_RX_SIZE (1522) // 1500 MTU + 18 HDR + 4 VLAN #define UDMA_MAX_RX_SIZE (9216) /* Interrupt Throttle Rate (ITR) constants */ #define ITR_LOW_INT_FREQ (10000) // Low latency interrupt frequency #define INTR_FREQ_MIN_FIXED (1000) // Min. interrupt frequency #define INTR_FREQ_MAX_FIXED (10000) // Max. interrupt frequency #define ITR_AVG_PKT_SIZE (1538) // Average packet size in bytes #define ITR_DEFAULT_PPC (20) // Packets per chain #define ITR_MIN_MBPS (100) // Min. data rate in Mbps #define ITR_MAX_MBPS (1200) // Max. data rate in Mbps #define ITR_MIN_STEP_MBPS (50) // Step increment in Mbps #define ITR_DEFAULT_MIN_MBPS (200) // Default minimum Mbps #define ITR_DEFAULT_MAX_MBPS (800) // Default maximum Mbps #define ITR_DEFAULT_STEP_MBPS (100) // Default step increment in Mbps #define ONE_SEC_TO_NS (1000000000) #define MAX_UDMA_STOP_DELAY (200) static u32 burst_sizes[] = {4, 8, 16, 32, 64, 128}; static u32 gap_values[] = {0, 16, 64, 256, 1024, 2048, 4096, 8192}; static void udma_start_rx_transfer(struct udma_device *umdev); static void udma_restart_rx_transfer(struct udma_device *umdev); static void udma_continue_rx_transfer(struct udma_device *umdev); static void udma_start_tx_transfer(struct udma_device *umdev); static int udma_allocate_rx_buffers(struct udma_device *udma_dev, int count); static int udma_setup_resources(struct udma_device *udma_dev); static void udma_free_resources(struct udma_device *udma_dev); #ifdef DEBUG #define UDMA_WARN_ON_RETURN(condition,ret) ({ \ int __ret_warn_on = !!(condition); \ if (__ret_warn_on) return ret; \ }) static inline void udma_print_desc(struct udma_desc *desc) { udma_info("desc : 0x%x\n", (u32)desc); udma_info("next_desc : 0x%x\n", (u32)desc->next_desc); udma_info("src : 0x%x\n", desc->src); udma_info("dest : 0x%x\n", desc->dst); udma_info("size : 0x%x\n", desc->union_field.size); udma_info("flags : 0x%x\n\n", (u32)desc->flags); } static void udma_print_ring(struct udma_queue *q, bool print_descs) { struct udma_ring *ring = &q->ring; udma_info("\n ======================= Ring Info =======================\n"); udma_info(" dma : 0x%x\n", (u32)ring->dma); udma_info(" desc : 0x%p\n", ring->desc); udma_info(" direction : %d\n", q->direction); udma_info(" dma_size : 0x%x\n", ring->dma_size); udma_info(" entries : %d\n", ring->entries); udma_info(" to_be_clean : %d\n", ring->to_be_clean); udma_info(" to_be_use : %d\n", ring->to_be_use); udma_info(" tail : %d\n", ring->tail); udma_info(" new_tail : %d\n", ring->new_tail); udma_info("\n =========================================================\n"); if (print_descs) { int i; udma_info("\n ==================== Ring Entries ====================\n"); for( i = 0 ; i < ring->entries ; i++ ) { udma_print_desc(INDEX_TO_DESC(i,ring)); } udma_info("\n ======================================================\n"); } } #else #define UDMA_WARN_ON_RETURN(condition,ret) do{}while(0) #define udma_print_ring(q, descs) \ do { \ } while(0) #endif //UDMA_DEBUG static inline u16 __get_using_desc(struct udma_ring *ring) { u16 i = ring->to_be_use, next; bool keep_going = true; struct udma_desc *cur_desc = NULL, *next_desc = NULL; cur_desc = INDEX_TO_DESC(i,ring); do { rmb(); next = NEXT_DESC_IDX(i, ring); next_desc = INDEX_TO_DESC(next,ring); prefetch(next_desc); if ((i!=ring->tail) && DESC_IS_DONE(cur_desc)) { i = next; cur_desc = next_desc; } else { keep_going = false; } } while (keep_going); return i; } static bool udma_rx_is_stopped(struct udma_device *umdev) { struct udma_hw *hw = umdev->hw; struct udma_desc *udma_tail_desc = NULL; struct udma_ring *rx = &umdev->rx.ring; u32 desc = 0, status = 0; int index; bool ret = false; if (!(hw->ops->rx_is_active(hw))) { status = hw->ops->get_irq_status(hw); if (status & UDMA_HW_VALID_RX_STOP_INTR_STATE(hw->port)) { ret = true; } else { desc = hw->ops->get_curr_rx_desc(hw); index = DESC_DMA_TO_DESC_INDEX(desc,rx); udma_tail_desc = INDEX_TO_DESC(rx->tail,rx); ret = ((index == rx->tail) && DESC_IS_DONE(udma_tail_desc)); } } return ret; } static bool udma_tx_is_stopped(struct udma_device *umdev) { struct udma_hw *hw = umdev->hw; struct udma_desc *udma_desc = NULL; struct udma_ring *tx = &umdev->tx.ring; u32 desc; int index; bool ret = false; if (!(hw->ops->tx_is_active(hw))) { desc = hw->ops->get_next_tx_desc(hw); if (unlikely(desc == 0)) { ret = true; } else { desc = hw->ops->get_curr_tx_desc(hw); index = DESC_DMA_TO_DESC_INDEX(desc,tx); udma_desc = INDEX_TO_DESC(tx->tail,tx); ret = ((index == tx->tail) && DESC_IS_DONE(udma_desc)); } } return ret; } static void udma_tx_q_pop(struct udma_device *umdev, const unsigned int idx, bool drop) { struct udma_queue *q = &umdev->tx; struct udma_desc *desc = NULL; struct udma_buffer *buffer_info = NULL; u32 data_len = 0; desc = INDEX_TO_DESC(idx,&q->ring); buffer_info = &q->ring.buffer_info[idx]; if (!drop) { data_len = buffer_info->length - DESC_DATA_BUFFER_LEN(desc); q->itr.c_bytes += data_len; q->stats.bytes += data_len; q->stats.pkts++; q->itr.c_packets++; } if (likely(buffer_info->dma)) { dma_unmap_single(&umdev->hw->pdev->dev, buffer_info->dma, buffer_info->length, DMA_TO_DEVICE); buffer_info->dma = 0; } if (likely(buffer_info->skb)) { dev_kfree_skb_any(buffer_info->skb); buffer_info->skb = 0; } } static void udma_rx_q_pop(struct udma_device *umdev, const unsigned int idx, bool drop) { struct udma_queue *q = &umdev->rx; struct udma_desc *desc = NULL; struct udma_buffer *buffer_info = NULL; u32 data_size = 0; buffer_info = &q->ring.buffer_info[idx]; desc = INDEX_TO_DESC(idx,&q->ring); data_size = umdev->rx_udma_size - DESC_DATA_BUFFER_LEN(desc); if (likely(buffer_info->dma)) { dma_unmap_single(&umdev->hw->pdev->dev, buffer_info->dma, buffer_info->length, DMA_FROM_DEVICE); buffer_info->dma = 0; } if (!drop && likely(DESC_IS_DONE(desc))) { q->stats.bytes += data_size; q->itr.c_bytes += data_size; q->stats.pkts++; q->itr.c_packets++; /* Remove FCS and transfer to net stack */ data_size -= ETH_FCS_LEN; skb_put(buffer_info->skb, data_size); q->callback(buffer_info->skb, umdev->netdev); buffer_info->skb = NULL; } else if (buffer_info->skb) { dev_kfree_skb_any(buffer_info->skb); buffer_info->skb = NULL; q->stats.drops++; } } static void inline udma_irq_enable(struct udma_hw *hw) { hw->ops->enable_tx_irq(hw); hw->ops->enable_rx_irq(hw); } static void inline udma_irq_disable(struct udma_hw *hw) { hw->ops->disable_tx_irq(hw); hw->ops->disable_rx_irq(hw); } /** * udma_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a private device structure **/ static irqreturn_t udma_intr(int irq, void *data) { struct udma_device *umdev = (struct udma_device *)data; struct udma_hw *hw = umdev->hw; struct udma_queue *rx = &umdev->rx; struct udma_queue *tx = &umdev->tx; u32 status = hw->ops->get_irq_status(hw); if (!(UDMA_HW_VALID_INTR_STATE(hw->port) & status)) { return IRQ_NONE; } /* Disable interrupts */ udma_irq_disable(umdev->hw); /* Tx interrupt */ if (status & UDMA_HW_VALID_TX_INTR_STATE(hw->port)) { udma_dbg("Tx udma_intr status 0x%x \n",status); spin_lock(&tx->ring.lock); tx->stats.irqs++; hw->ops->clear_tx_irq(hw); spin_unlock(&tx->ring.lock); } /* Rx interrupt */ if (status & UDMA_HW_VALID_RX_INTR_STATE(hw->port)) { udma_dbg("Rx udma_intr status 0x%x \n",status); spin_lock(&rx->ring.lock); rx->stats.irqs++; hw->ops->clear_rx_irq(hw); spin_unlock(&rx->ring.lock); } /* Start HR timer */ if (!hrtimer_active(&umdev->itr_timer)) { hrtimer_start(&umdev->itr_timer, ns_to_ktime(umdev->itr_cfg.ns), HRTIMER_MODE_REL); } return IRQ_HANDLED; } #ifdef SOFT_IRQ_STATS static void udma_update_q_stats(struct udma_queue *q) { int pkts; if (q->direction == UDMA_RX) /* Number of packets received and waiting for releasing */ pkts = IDX_SUB(q->ring.to_be_use, q->ring.to_be_clean, &q->ring); else /* Number of packets waiting for transmission */ pkts = IDX_SUB(q->ring.new_tail, q->ring.to_be_clean, &q->ring); if (pkts > q->stats.max_pkts) q->stats.max_pkts = pkts; q->stats.acc_pkts += pkts; } #endif /* Clean finished descriptors */ static int udma_clean_done_desc(struct udma_device *umdev, struct udma_queue *q, const int budget, bool get_spinlock) { int i, using = 0, cleaned = 0; unsigned long flags = 0; struct udma_ring *ring = &q->ring; if (get_spinlock) spin_lock_irqsave(&ring->lock, flags); ring->to_be_use = __get_using_desc(ring); #ifdef SOFT_IRQ_STATS udma_update_q_stats(q); #endif using = ring->to_be_use; if (get_spinlock) spin_unlock_irqrestore(&ring->lock, flags); i = ring->to_be_clean; while (i != using && cleaned < budget) { q->pop_desc(umdev, i, false); cleaned++; i = NEXT_DESC_IDX(i,ring); } ring->to_be_clean = i; return cleaned; } static int udma_process_tx_queue(struct udma_device *umdev) { unsigned long flags; int done = 0; spin_lock_irqsave(&umdev->tx.ring.lock, flags); /* Clean all Tx finished descriptors */ done = udma_clean_done_desc(umdev, &umdev->tx, UDMA_RING_VALID_MAX_NUM(umdev->tx.ring), false); if (udma_tx_is_stopped(umdev)) { udma_start_tx_transfer(umdev); } spin_unlock_irqrestore(&umdev->tx.ring.lock, flags); return done; } static int udma_process_rx_queue(struct udma_device *umdev, const int budget) { unsigned long flags; int done = 0; /* Clean Rx finished descriptors up to budget */ done = udma_clean_done_desc(umdev, &umdev->rx, budget, true); if (done) udma_allocate_rx_buffers(umdev, done); spin_lock_irqsave(&umdev->rx.ring.lock, flags); if (udma_rx_is_stopped(umdev)) { udma_start_rx_transfer(umdev); } else { udma_continue_rx_transfer(umdev); /* Check stop again to avoid UDMA stopped during the transaction */ if (udma_rx_is_stopped(umdev)) udma_restart_rx_transfer(umdev); } spin_unlock_irqrestore(&umdev->rx.ring.lock, flags); return done; } static void udma_set_fixed_itr(struct udma_itr_config *itr, u32 cps) { if((cps < INTR_FREQ_MIN_FIXED) || (cps > INTR_FREQ_MAX_FIXED)) { udma_err("Interrupt frequency must be in the range from %d to %d.\n", INTR_FREQ_MIN_FIXED, INTR_FREQ_MAX_FIXED); } else { itr->mode = ITR_FIXED; itr->ns = ONE_SEC_TO_NS/cps; } } static inline u32 mbps_to_cps(u32 mbps, u32 ppc) { return ((mbps * 1000000) / (ppc * ITR_AVG_PKT_SIZE * 8)); } static inline u32 cps_to_mbps(u32 cps, u32 ppc) { return (cps * (ppc * ITR_AVG_PKT_SIZE * 8) / 1000000); } static void udma_set_adaptive_itr(struct udma_itr_config *itr, int min_mbps, int max_mbps, int step_mbps, int ppc) { if( min_mbps < ITR_MIN_MBPS ) { udma_err("Minimum data rate for adaptive mode is (%d)\n", ITR_MIN_MBPS); } else if( max_mbps > ITR_MAX_MBPS ) { udma_err("Maximum data rate for adaptive mode is (%d)\n", ITR_MAX_MBPS); } else if ( step_mbps < ITR_MIN_STEP_MBPS ) { udma_err("Minimum step for adaptive mode is (%d)\n", ITR_MIN_STEP_MBPS); } else { itr->mode = ITR_ADAPTIVE; itr->ppc = ppc; itr->min_cps = mbps_to_cps(min_mbps, ppc); itr->max_cps = mbps_to_cps(max_mbps, ppc); itr->step_cps = mbps_to_cps(step_mbps, ppc); } } static void udma_init_adaptive_itr(struct udma_device *umdev) { memset(&umdev->rx.itr, 0, sizeof(struct udma_adapt_itr)); memset(&umdev->tx.itr, 0, sizeof(struct udma_adapt_itr)); umdev->rx.itr.latency = LOW_LATENCY; umdev->tx.itr.latency = LOW_LATENCY; umdev->rx.itr.cps = ITR_LOW_INT_FREQ; umdev->tx.itr.cps = ITR_LOW_INT_FREQ; umdev->itr_cfg.ns = ONE_SEC_TO_NS/ITR_LOW_INT_FREQ; } static void udma_update_itr(struct udma_itr_config *itr_cfg, struct udma_adapt_itr *itr) { u16 new_itr_latency = itr->latency; u16 new_cps = itr->cps; u32 bytes_per_packet = 0; if (itr->c_packets) { bytes_per_packet = itr->c_bytes/itr->c_packets; switch (itr->latency) { case LOW_LATENCY: if ((bytes_per_packet > 1200) && (itr->c_packets > 5)) new_itr_latency = BULK_LATENCY; break; case BULK_LATENCY: if ((itr->c_bytes < 6000) && (itr->c_packets < 5)) new_itr_latency = LOW_LATENCY; break; } if (new_itr_latency == LOW_LATENCY) { /* LOW LATENCY CASE */ new_cps = min(itr->cps + itr_cfg->step_cps, (u32)ITR_LOW_INT_FREQ); } else { /* BULK LATENCY CASE */ if( bytes_per_packet > 1600 ) { /* Jumbo frames detected */ new_cps = itr_cfg->min_cps; } else if (new_itr_latency != itr->latency) { /* Going from low to bulk latency mode */ new_cps = min(itr->cps - itr_cfg->step_cps, itr_cfg->max_cps); } else if (itr->c_packets < itr->lo_ppc) { /* Packets in chain is too low, decrease interrupt frequency */ new_cps = max(itr->cps - itr_cfg->step_cps, itr_cfg->min_cps); } else if (itr->c_packets > itr->hi_ppc) { /* Packets in chain is too high, increase interrupt frequency */ new_cps = min(itr->cps + itr_cfg->step_cps, itr_cfg->max_cps); } if (itr->cps != new_cps) { /* Update low and high packets per chain thresholds */ u32 thres_cps = (itr_cfg->step_cps>>1)+(itr_cfg->step_cps>>2); itr->lo_ppc = ((new_cps-thres_cps)*itr_cfg->ppc)/new_cps; itr->hi_ppc = (((new_cps+thres_cps)*itr_cfg->ppc)/new_cps) + 1; } } itr->cps = new_cps; itr->latency = new_itr_latency; } } static int udma_process_queues(struct napi_struct *napi, int budget) { struct udma_device *umdev = container_of(napi, struct udma_device, napi); int tx_work_done, rx_work_done; #ifdef SOFT_IRQ_STATS umdev->tx.stats.s_irqs++; umdev->rx.stats.s_irqs++; #endif tx_work_done = udma_process_tx_queue(umdev); rx_work_done = udma_process_rx_queue(umdev,budget); if (tx_work_done == UDMA_RING_VALID_MAX_NUM(umdev->tx.ring)) { rx_work_done = budget; } if (rx_work_done < budget) { if (umdev->itr_cfg.mode == ITR_ADAPTIVE) { udma_update_itr(&umdev->itr_cfg, &umdev->tx.itr); udma_update_itr(&umdev->itr_cfg, &umdev->rx.itr); umdev->itr_cfg.ns = ONE_SEC_TO_NS/min(umdev->rx.itr.cps, umdev->tx.itr.cps); } napi_complete(napi); udma_irq_enable(umdev->hw); } return rx_work_done; } static void udma_start_rx_transfer(struct udma_device *umdev) { struct udma_hw *hw = umdev->hw; struct udma_ring *rx = &umdev->rx.ring; struct udma_desc *udma_desc_new_tail; if(rx->tail != rx->new_tail) { rx->to_be_use = NEXT_DESC_IDX(rx->tail, rx); udma_desc_new_tail = &rx->desc[rx->new_tail]; SET_DESC_FLAGS(udma_desc_new_tail, UDMA_TERM_EN); rx->tail = rx->new_tail; hw->ops->start_rx_transfer(hw, DESC_INDEX_TO_DESC_DMA(rx->to_be_use,rx)); } } static void udma_continue_rx_transfer(struct udma_device *umdev) { struct udma_hw *hw = umdev->hw; struct udma_ring *rx = &umdev->rx.ring; struct udma_desc *udma_desc_tail; if (rx->tail != rx->new_tail) { udma_desc_tail = &rx->desc[rx->tail]; CLEAR_DESC_FLAGS(udma_desc_tail, UDMA_TERM_EN); rx->tail = rx->new_tail; udma_desc_tail = &rx->desc[rx->tail]; SET_DESC_FLAGS(udma_desc_tail, UDMA_TERM_EN); /* clear termination in flag register after update tail */ if(hw->ops->rx_is_term(hw)){ hw->ops->clear_rx_term(hw); } } } static void udma_restart_rx_transfer(struct udma_device *umdev) { struct udma_hw *hw = umdev->hw; struct udma_ring *rx = &umdev->rx.ring; struct udma_desc *udma_desc_tail; rx->to_be_use = __get_using_desc(rx); udma_desc_tail = &rx->desc[rx->tail]; if (!DESC_IS_DONE(udma_desc_tail)) { hw->ops->start_rx_transfer(hw, DESC_INDEX_TO_DESC_DMA(rx->to_be_use,rx)); } } static void udma_start_tx_transfer(struct udma_device *umdev) { struct udma_hw *hw = umdev->hw; struct udma_ring *tx = &umdev->tx.ring; if (tx->tail != tx->new_tail) { tx->to_be_use = NEXT_DESC_IDX(tx->tail, tx); SET_DESC_FLAGS(&tx->desc[tx->new_tail], UDMA_TERM_EN | UDMA_SRC_INT_EN); tx->tail = tx->new_tail; hw->ops->start_tx_transfer(hw, DESC_INDEX_TO_DESC_DMA(tx->to_be_use,tx)); } } static inline bool is_ring_full(struct udma_ring *ring) { return (((ring->new_tail + 2)%ring->entries) == ring->to_be_clean); } static inline int add_tx_skb(struct udma_device *umdev, struct sk_buff *skb) { struct udma_buffer *buffer_info = NULL; dma_addr_t dma; u32 len = skb_headlen(skb), idx = 0; int ret = UDMA_OK; struct udma_ring *tx = &umdev->tx.ring; struct udma_hw *hw = umdev->hw; if (skb->len < ETH_ZLEN) { WARN_ON(skb_pad(skb,ETH_ZLEN + ETH_FCS_LEN - skb->len)); len = ETH_ZLEN + ETH_FCS_LEN; } else { WARN_ON(skb_pad(skb,ETH_FCS_LEN)); len += ETH_FCS_LEN; } dma = dma_map_single(&hw->pdev->dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(&hw->pdev->dev, dma)) { dev_err(&hw->pdev->dev, "DMA map failed\n"); ret = UDMA_ERR; } else { /* Fill in the buffer info to the descriptor */ idx = NEXT_DESC_IDX(tx->new_tail, tx); tx->new_tail = idx; buffer_info = &tx->buffer_info[idx]; buffer_info->skb = skb; buffer_info->dma = dma; buffer_info->length = len; hw->ops->update_tx_desc(hw, &tx->desc[idx],dma,len); } if (udma_tx_is_stopped(umdev)) udma_start_tx_transfer(umdev); return ret; } int udma_send_packet(unsigned char port, struct sk_buff *skb) { struct udma_device *umdev = NULL; struct udma_ring *tx = NULL; int ret = UDMA_OK; unsigned long flags; /* skb check*/ if (unlikely((!skb) || (skb->len <= 0) || (skb->data_len !=0))) return UDMA_INVALID_PARAM; umdev = udma_devs[port]; tx = &umdev->tx.ring; spin_lock_irqsave(&tx->lock, flags); if (is_ring_full(tx)) { /* Release processed buffers in Tx queue */ udma_clean_done_desc(umdev, &umdev->tx, UDMA_RING_VALID_MAX_NUM(umdev->tx.ring), false); ret = UDMA_FULL; umdev->tx.stats.drops++; } else { ret = add_tx_skb(umdev, skb); } spin_unlock_irqrestore(&tx->lock, flags); return ret; } /** * Allocate buffers to UDMA driver to receive packets. * @umdev - UDMA device * @count - Number of buffers to be allocated * * It's required to allocate a single buffer for a packet. * Thus the coming free buffer size should be large enough, * i.e larger than 1536 bytes. Otherwise, the buffer would be refused. * * return UDMA_OK: Buffers were successfully allocated. * return UDMA_ERR: Failure allocating or mapping buffer. */ static int udma_allocate_rx_buffers(struct udma_device *umdev, int count) { struct udma_hw *hw = NULL; struct udma_ring *rx = NULL; unsigned long flags; struct udma_buffer *buffer_info; dma_addr_t dma; int ret = UDMA_OK; struct sk_buff *skb = NULL; int i = 0; hw = umdev->hw; rx = &umdev->rx.ring; while (count-- ) { skb = netdev_alloc_skb_ip_align(umdev->netdev, umdev->rx_udma_size + umdev->rx_skb_reserve); if(unlikely(!skb)){ printk("allocate new skb failed in %s function\n", __func__); return UDMA_ERR; } if (umdev->rx_skb_reserve) skb_reserve(skb, umdev->rx_skb_reserve); dma = dma_map_single(&hw->pdev->dev, skb->data, umdev->rx_udma_size, DMA_FROM_DEVICE); if (dma_mapping_error(&hw->pdev->dev, dma)) { dev_err(&hw->pdev->dev, "DMA map failed\n"); dev_kfree_skb_any(skb); return UDMA_ERR; } /* Fill in the buffer info to the descriptor */ spin_lock_irqsave(&rx->lock, flags); i = NEXT_DESC_IDX(rx->new_tail, rx); rx->new_tail = i; buffer_info = &rx->buffer_info[i]; buffer_info->skb = skb; buffer_info->dma = dma; buffer_info->length = umdev->rx_udma_size; hw->ops->update_rx_desc(hw, &rx->desc[i], dma, umdev->rx_udma_size); spin_unlock_irqrestore(&rx->lock, flags); } return ret; } /** udma_register_handler - register the Rx callback * * @port - udma port number, could be 0 or 1 * @rx_handle - Rx callback. Once a buffer is received, UDMA driver fills the buffer descriptor information and calls rx_handle * Note that the rx_callback is called in a softIRQ context * * A prototype of a udma_handle is: * int udma_handle(int port, udma_buffer_desc_t *buffer_desc); * * At UDMA driver exit, it will invalide the buffer descriptors and will send * all of the received Rx free buffers to upper layer for clean by the * Rx_handle callback. * * return UDMA_OK, success * return UDMA_ERR, failure */ int udma_register_handler(unsigned char port, struct net_device *dev, rx_callback_t rx_handle) { struct udma_device *umdev = NULL; struct udma_hw *hw = NULL; unsigned long flags = 0; int err = 0; UDMA_WARN_ON_RETURN((port >= UDMA_PORT_NUM_TOTAL),UDMA_INVALID_PARAM); UDMA_WARN_ON_RETURN(!rx_handle,UDMA_INVALID_PARAM); umdev = udma_devs[port]; if (umdev == NULL) { udma_err("UDMA Driver is not installed\n"); return UDMA_UNINITIALIZED; } mutex_lock(&umdev->mutex); if (umdev->state != PORT_AVAILABLE) { udma_err("UDMA device %d is already being used by others.\n", port); err = UDMA_BUSY; goto error; } umdev->netdev = dev; hw = umdev->hw; hw->ops->hw_init(hw); err = udma_setup_resources(umdev); if (err) { hw->ops->hw_exit(hw); goto error; } err = request_irq (hw->pdev->irq, udma_intr, IRQF_SHARED, hw->port? UDMA1_NAME : UDMA0_NAME, umdev); if (err) { udma_err("Error requesting UDMA interrupt for port %d!\n", hw->port); udma_free_resources(umdev); hw->ops->hw_exit(hw); goto error; } if (umdev->itr_cfg.mode == ITR_ADAPTIVE ) { udma_init_adaptive_itr (umdev); } umdev->rx.callback = rx_handle; umdev->state = PORT_IN_USE; netif_napi_add(dev,&umdev->napi, udma_process_queues,64); napi_enable(&umdev->napi); udma_irq_enable(umdev->hw); /* Enable UDMA Rx */ udma_allocate_rx_buffers(umdev, UDMA_RING_VALID_MAX_NUM(umdev->rx.ring)); spin_lock_irqsave(&umdev->rx.ring.lock, flags); udma_start_rx_transfer(umdev); spin_unlock_irqrestore(&umdev->rx.ring.lock, flags); error: mutex_unlock(&umdev->mutex); return err; } /** udma_flush - Stop the UDMA, flushes the pending requests in the UDMA port and releases all the remaining buffers. * * @port - udma port number, could be 0 or 1 * * This function is expected to be called by upper layer when exit * */ void udma_flush(unsigned char port) { struct udma_device *umdev = NULL; struct udma_hw *hw = NULL; int delay; if (WARN_ON(port >= UDMA_PORT_NUM_TOTAL)) { udma_err("%s invalid parameters \n",__FUNCTION__); return; } umdev = udma_devs[port]; hw = umdev->hw; mutex_lock(&umdev->mutex); if (umdev->state != PORT_IN_USE) { udma_err("UDMA device %d is not currently in use!\n", port); goto error; } hrtimer_cancel(&umdev->itr_timer); napi_disable(&umdev->napi); netif_napi_del(&umdev->napi); /* Stop and clear IRQs */ hw->ops->stop_tx_transfer(hw); delay = MAX_UDMA_STOP_DELAY; while (!hw->ops->tx_is_stopped(hw) && delay--) { mdelay(1); } hw->ops->clear_tx_irq(hw); hw->ops->stop_rx_transfer(hw); delay = MAX_UDMA_STOP_DELAY; while (!hw->ops->rx_is_stopped(hw) && delay--) { mdelay(1); } hw->ops->clear_rx_irq(hw); udma_irq_disable(umdev->hw); hw->ops->hw_exit(hw); free_irq(hw->pdev->irq, umdev); udma_free_resources(umdev); umdev->netdev = NULL; umdev->state = PORT_AVAILABLE; error: mutex_unlock(&umdev->mutex); } EXPORT_SYMBOL_GPL(udma_send_packet); EXPORT_SYMBOL_GPL(udma_register_handler); EXPORT_SYMBOL_GPL(udma_flush); enum hrtimer_restart itr_timeout(struct hrtimer *timer) { struct udma_device *umdev; umdev = container_of(timer, struct udma_device, itr_timer); /* Schedule NAPI softIRQ to start polling */ if (napi_schedule_prep(&umdev->napi)) { umdev->tx.itr.c_bytes = 0; umdev->tx.itr.c_packets = 0; umdev->rx.itr.c_bytes = 0; umdev->rx.itr.c_packets = 0; __napi_schedule(&umdev->napi); } return HRTIMER_NORESTART; } /**************************************************************************** * The UDMA SW stack setup/initialization routine ****************************************************************************/ static int udma_alloc_ring (struct udma_ring *ring, struct udma_hw *hw, void (*init_desc)(struct udma_hw *, struct udma_desc *)) { struct udma_desc *desc = NULL; int i = 0; spin_lock_init(&ring->lock); ring->buffer_info = vzalloc(sizeof(struct udma_buffer) * ring->entries); if (!ring->buffer_info) { udma_err("Cannot allocate memory for buffer_info\n"); return -ENOMEM; } ring->dma_size = sizeof(struct udma_desc) * ring->entries; ring->desc = dma_alloc_coherent(&hw->pdev->dev, ring->dma_size, &ring->dma, GFP_KERNEL); if (!ring->desc) { vfree(ring->buffer_info); return -ENOMEM; } desc = ring->desc; /* Link the descriptors one by one */ while (i < ring->entries) { memset(&desc[i], 0, sizeof(struct udma_desc)); desc[i].next_desc = DESC_INDEX_TO_DESC_DMA((i+1)%ring->entries, ring); (*init_desc)(hw, &desc[i]); i++; } return 0; } static void udma_free_ring_dma(struct udma_ring *ring, struct pci_dev *pdev) { if (ring->buffer_info) { vfree(ring->buffer_info); ring->buffer_info = 0; } if (ring->dma) { dma_free_coherent(&pdev->dev, ring->dma_size, ring->desc, ring->dma); ring->dma = 0; ring->dma_size = 0; } } static int udma_setup_resources(struct udma_device *umdev) { int err; err = udma_alloc_ring(&umdev->rx.ring, umdev->hw, umdev->hw->ops->init_rx_desc); if (err != 0) { udma_err("Error allocating Rx ring for port %d.\n", umdev->hw->port); } else if ((err = udma_alloc_ring(&umdev->tx.ring, umdev->hw, umdev->hw->ops->init_tx_desc)) != 0) { udma_err("Error allocating Tx ring for port %d.\n", umdev->hw->port); udma_free_ring_dma(&umdev->rx.ring, umdev->hw->pdev); } else { memset(&umdev->rx.stats, 0, sizeof(struct udma_stats)); memset(&umdev->tx.stats, 0, sizeof(struct udma_stats)); umdev->rx.pop_desc = udma_rx_q_pop; umdev->tx.pop_desc = udma_tx_q_pop; } return err; } static void udma_free_q(struct udma_device *umdev, struct udma_queue *q) { u32 i = 0, entries = q->ring.entries, dir = q->direction; struct udma_ring *ring = &q->ring; /* Clean all descriptors in the ring */ for (i = 0; i < entries; i++) q->pop_desc(umdev, i, true); memset(ring->buffer_info, 0, (sizeof(struct udma_buffer) * ring->entries)); memset(ring->desc, 0, ring->dma_size); udma_free_ring_dma(ring, umdev->hw->pdev); memset(q, 0, sizeof(struct udma_queue)); q->direction = dir; q->ring.entries = entries; q->ring.ent_mask = entries -1; } static void udma_free_resources(struct udma_device *umdev) { udma_free_q(umdev, &umdev->tx); udma_free_q(umdev, &umdev->rx); } static int udma_dev_init(struct udma_device *umdev) { hrtimer_init(&umdev->itr_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); umdev->itr_timer.function = itr_timeout; mutex_init(&umdev->mutex); memset(&umdev->rx, 0, sizeof(struct udma_queue)); memset(&umdev->tx, 0, sizeof(struct udma_queue)); umdev->rx.ring.entries = UDMA_RING_DEFAULT_ENTRIES; umdev->tx.ring.entries = UDMA_RING_DEFAULT_ENTRIES; umdev->rx.ring.ent_mask = UDMA_RING_DEFAULT_ENTRIES - 1; umdev->tx.ring.ent_mask = UDMA_RING_DEFAULT_ENTRIES - 1; umdev->rx.direction = UDMA_RX; umdev->tx.direction = UDMA_TX; umdev->rx_udma_size = UDMA_MIN_RX_SIZE; umdev->rx_skb_reserve = 0; umdev->netdev = NULL; umdev->state = PORT_AVAILABLE; udma_set_fixed_itr(&umdev->itr_cfg, INTR_FREQ_MIN_FIXED); return 0; } struct udma_hw *udma_alloc_hw(size_t size) { struct udma_device *umdev = NULL; struct udma_hw *hw = NULL; hw = kzalloc(sizeof(struct udma_device) + size, GFP_KERNEL); if (NULL == hw) { udma_err("Cannot allocate memory\n"); return ERR_PTR(-ENOMEM); } umdev = (struct udma_device *)udma_hw_priv(hw); umdev->hw = hw; return hw; } EXPORT_SYMBOL_GPL(udma_alloc_hw); #ifdef DEBUG /* For debug purpose */ static int udma_open(struct inode *inode, struct file *filp) { return 0; } static int udma_close(struct inode *inode, struct file *filp) { return 0; } static long udma_unlocked_ioctl(struct file *filp, unsigned int arg, unsigned long cmd) { struct udma_device *umdev = NULL; struct udma_hw *hw = NULL; int port = arg - UDMA_PORT_MAGIC_BASE; if (port >= UDMA_PORT_NUM_TOTAL) return -EIO; umdev = udma_devs[port]; hw = umdev->hw; switch (cmd) { case UDMA_DUMP_TX_CURR_RING: udma_print_ring(&umdev->tx, true); break; case UDMA_DUMP_RX_CURR_RING: udma_print_ring(&umdev->rx, true); break; case UDMA_DUMP_STATS: case UDMA_DUMP_ITR_INFO: //TODO Implement these debug use cases. break; case UDMA_DUMP_CURR_TX_REGS: udma_regs_dump(hw,UDMA_TX); break; case UDMA_DUMP_CURR_RX_REGS: udma_regs_dump(hw,UDMA_RX); break; default: udma_err("UDMA driver receive Wrong IOCTL command = 0x%lx\n",cmd); return -EFAULT; } return 0; } static struct file_operations udma_fops = { .owner = THIS_MODULE, .unlocked_ioctl = udma_unlocked_ioctl, .open = udma_open, .release = udma_close, }; #endif #define PRINT_QUEUE_STATS(dir) \ " [" #dir "] Queue Size : (%u)\n" \ " [" #dir "] IRQs : (%u)\n" \ " [" #dir "] Packets : (%u)\n" \ " [" #dir "] Total bytes : (%u)\n" \ " [" #dir "] Total drops : (%u)\n", \ umdev->dir.ring.entries, umdev->dir.stats.irqs, umdev->dir.stats.pkts, \ umdev->dir.stats.bytes, umdev->dir.stats.drops #define PRINT_S_IRQ_STATS(dir) \ " [" #dir "] softIRQs : (%u)\n" \ " [" #dir "] Max pkts : (%u)\n" \ " [" #dir "] Acc pkts : (%u)\n", \ umdev->dir.stats.s_irqs, umdev->dir.stats.max_pkts, \ umdev->dir.stats.acc_pkts int proc_udma_status(struct seq_file *sf, void *v) { struct udma_device *umdev; int idx; for (idx = 0; idx < UDMA_PORT_NUM_TOTAL; idx++) { umdev = udma_devs[idx]; seq_printf(sf, "\nudma port(%d):\n", idx); seq_printf(sf, PRINT_QUEUE_STATS(tx)); #ifdef SOFT_IRQ_STATS seq_printf(sf, PRINT_S_IRQ_STATS(tx)); #endif seq_printf(sf, "\n [rx] Buffer Size : (%u)\n", umdev->rx_udma_size + umdev->rx_skb_reserve); seq_printf(sf, " [rx] Buffer Reserved : (%u)\n", umdev->rx_skb_reserve); seq_printf(sf, PRINT_QUEUE_STATS(rx)); #ifdef SOFT_IRQ_STATS seq_printf(sf, PRINT_S_IRQ_STATS(rx)); #endif } return 0; } int proc_gap_burst_status(struct seq_file *sf, void *v) { struct udma_device *umdev; int idx; for (idx = 0; idx < UDMA_PORT_NUM_TOTAL; idx++) { umdev = udma_devs[idx]; seq_printf(sf, "udma port(%d):\n", idx); seq_printf(sf, " [udma --> l2] gap value: %d cycles, burst size: %d bytes\n", gap_values[umdev->hw->mode.udma_to_l2_gap], burst_sizes[umdev->hw->mode.udma_to_l2_burst]); seq_printf(sf, " [l2 --> udma] gap value: %d cycles, burst size: %d bytes\n", gap_values[umdev->hw->mode.l2_to_udma_gap], burst_sizes[umdev->hw->mode.l2_to_udma_burst]); } return 0; } int proc_itr_status(struct seq_file *sf, void *v) { struct udma_device *umdev; int idx; for (idx = 0; idx < UDMA_PORT_NUM_TOTAL; idx++) { umdev = udma_devs[idx]; seq_printf(sf, "udma port(%d):\n", idx); seq_printf(sf, " mode: %d, ns: %d, ppc: %d\n", umdev->itr_cfg.mode, umdev->itr_cfg.ns, umdev->itr_cfg.ppc); seq_printf(sf, " min_cps: %d, max_cps: %d, step_cps: %d\n", umdev->itr_cfg.min_cps, umdev->itr_cfg.max_cps, umdev->itr_cfg.step_cps); seq_printf(sf, " min_mbps: %d, max_mbps: %d, step_mbps: %d\n", cps_to_mbps(umdev->itr_cfg.min_cps, umdev->itr_cfg.ppc), cps_to_mbps(umdev->itr_cfg.max_cps, umdev->itr_cfg.ppc), cps_to_mbps(umdev->itr_cfg.step_cps, umdev->itr_cfg.ppc)); seq_printf(sf, " tx.latency: %d, tx.cps: %d\n" " rx.latency: %d, rx.cps: %d\n", umdev->tx.itr.latency, umdev->tx.itr.cps, umdev->rx.itr.latency, umdev->rx.itr.cps); } return 0; } static void udma_parse_args(char *str, u8 *argc, char **argv, int max_arg) { char *ch; bool in_token = false; for (ch = str; *ch != '\0' && *argc < max_arg; ch++) { if (*ch == '\t' || *ch == ' ') { *ch = '\0'; in_token = false; } else if (!isdigit(*ch)) { printk(KERN_DEBUG "invalid parameter format!\n"); } else if (!in_token) { argv[*argc] = ch; (*argc)++; in_token = true; } } } static int udma_burst_index(int burst_value) { int i = 0, array_size = sizeof(burst_sizes) / sizeof(u32); while (i < array_size && burst_sizes[i] != burst_value) i++; return ((i == array_size)?(-1):i); } static int udma_gap_index(int gap_value) { int i = 0, array_size = sizeof(gap_values) / sizeof(u32); while (i < array_size && gap_values[i] != gap_value) i++; return ((i == array_size)?(-1):i); } int udma_set_burstsize( uint8_t port, udma_burstsz_t udma_to_l2_bst, udma_burstsz_t l2_to_udma_bst) { struct udma_device *umdev = NULL; int ret = UDMA_ERR; if ((port < 0) || (port > 1)) { udma_err("Port number must be 0 or 1.\n"); } else if ((umdev = udma_devs[port]) == NULL) { udma_err("UDMA Driver is not installed\n"); } else if ((udma_to_l2_bst < UDMA_BURST_SZ_4_BYTES) || (udma_to_l2_bst > UDMA_BURST_SZ_128_BYTES) || (l2_to_udma_bst < UDMA_BURST_SZ_4_BYTES) || (l2_to_udma_bst > UDMA_BURST_SZ_128_BYTES)) { udma_err("Burst size must be 4, 8, 16, 32, 64, 128 bytes.\n"); } else { mutex_lock(&umdev->mutex); if (umdev->state != PORT_AVAILABLE) { udma_err("UDMA port(%d) is in use.\n", port); } else { umdev->hw->mode.udma_to_l2_burst = udma_to_l2_bst; umdev->hw->mode.l2_to_udma_burst = l2_to_udma_bst; ret = UDMA_OK; } mutex_unlock(&umdev->mutex); } return ret; } int udma_set_gapval( uint8_t port, udma_gapval_t udma_to_l2_gap, udma_gapval_t l2_to_udma_gap) { struct udma_device *umdev = NULL; int ret = UDMA_ERR; if ((port < 0) || (port > 1)) { udma_err("Port number must be 0 or 1.\n"); } else if ((umdev = udma_devs[port]) == NULL) { udma_err("UDMA Driver is not installed\n"); } else if ((udma_to_l2_gap < UDMA_GAP_VAL_0_CLKS) || (udma_to_l2_gap > UDMA_GAP_VAL_8192_CLKS) || (l2_to_udma_gap < UDMA_GAP_VAL_0_CLKS) || (l2_to_udma_gap > UDMA_GAP_VAL_8192_CLKS)) { udma_err("Gap value must be 0, 16, 64, 256, 1024, 2048, " "4096 or 8192 clocks:\n"); } else { mutex_lock(&umdev->mutex); if (umdev->state != PORT_AVAILABLE) { udma_err("UDMA port(%d) is in use.\n", port); } else { umdev->hw->mode.udma_to_l2_gap = udma_to_l2_gap; umdev->hw->mode.l2_to_udma_gap = l2_to_udma_gap; ret = UDMA_OK; } mutex_unlock(&umdev->mutex); } return ret; } int proc_gap_burst_control(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { char *str; int port = -1, udma_to_l2_gap = -1, l2_to_udma_gap = -1; int udma_to_l2_bst = -1, l2_to_udma_bst = -1; u8 argc = 0; char *argv[5]; str = kmalloc(count, GFP_KERNEL); if (!str) return -ENOMEM; if (copy_from_user(str, buffer, count)) return -EFAULT; udma_parse_args(str,&argc,argv, 5); if (argc == 5) { port = simple_strtol(argv[0], NULL, 10); udma_to_l2_gap = udma_gap_index((int)simple_strtol(argv[1], NULL, 10)); udma_to_l2_bst = udma_burst_index((int)simple_strtol(argv[2], NULL, 10)); l2_to_udma_gap = udma_gap_index((int)simple_strtol(argv[3], NULL, 10)); l2_to_udma_bst = udma_burst_index((int)simple_strtol(argv[4], NULL, 10)); udma_set_burstsize(port, udma_to_l2_bst, l2_to_udma_bst); udma_set_gapval(port, udma_to_l2_gap, l2_to_udma_gap); } kfree(str); return count; } int proc_itr_control(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { char *str; int port = -1, itr_mode = -1; int itr_p1 = ITR_DEFAULT_MIN_MBPS, itr_p2 = ITR_DEFAULT_MAX_MBPS; int itr_p3 = ITR_DEFAULT_STEP_MBPS, itr_p4 = ITR_DEFAULT_PPC; u8 argc = 0; char *argv[6]; struct udma_device *umdev = NULL; str = kmalloc(count, GFP_KERNEL); if (!str) return -ENOMEM; if (copy_from_user(str, buffer, count)) return -EFAULT; udma_parse_args(str,&argc,argv, 6); if( argc > 1 ) { port = simple_strtol(argv[0], NULL, 10); itr_mode = simple_strtol(argv[1], NULL, 10); if( argc > 2 ) itr_p1 = simple_strtol(argv[2], NULL, 10); if( argc > 3 ) itr_p2 = simple_strtol(argv[3], NULL, 10); if( argc > 4 ) itr_p3 = simple_strtol(argv[4], NULL, 10); if( argc > 5 ) itr_p4 = simple_strtol(argv[5], NULL, 10); } if((port < 0) || (port > 1)) udma_err("Port number must be 0 or 1.\n"); else umdev = udma_devs[port]; if (umdev == NULL) { udma_err("UDMA Driver is not available for port(%d).\n", port); } else if((itr_mode < 0) || (itr_mode > ITR_DISABLED)) { udma_err("ITR mode must be 0 (adaptive), 1 (fixed) or 2 (disabled).\n"); } else { mutex_lock(&umdev->mutex); if (umdev->state != PORT_AVAILABLE) { udma_err("UDMA port(%d) is in use.\n", port); } else if (itr_mode == ITR_FIXED) { udma_set_fixed_itr(&umdev->itr_cfg, itr_p1); } else if(itr_mode == ITR_ADAPTIVE) { udma_set_adaptive_itr(&umdev->itr_cfg, itr_p1, itr_p2, itr_p3, itr_p4); } else { umdev->itr_cfg.ns = 1; } mutex_unlock(&umdev->mutex); } kfree(str); return count; } #define VALID_ENTRIES(ent) \ (ent >= UDMA_RING_MIN_ENTRIES) && (ent <= UDMA_RING_MAX_ENTRIES) && \ !(ent & (ent - 1)) int proc_queue_control(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { char *str; int port = -1, rx_entries = -1, tx_entries = -1; u8 argc = 0; char *argv[3]; struct udma_device *umdev = NULL; str = kmalloc(count, GFP_KERNEL); if (!str) return -ENOMEM; if (copy_from_user(str, buffer, count)) return -EFAULT; udma_parse_args(str,&argc,argv, 3); if( argc > 1 ) { port = simple_strtol(argv[0], NULL, 10); rx_entries = simple_strtol(argv[1], NULL, 10); if( argc > 2 ) tx_entries = simple_strtol(argv[2], NULL, 10); } if ((port < 0) || (port > 1)) udma_err("Port number must be 0 or 1.\n"); else umdev = udma_devs[port]; if (umdev == NULL) { udma_err("UDMA Driver is not available for port(%d).\n", port); } else { mutex_lock(&umdev->mutex); if (umdev->state != PORT_AVAILABLE) { udma_err("UDMA port(%d) is in use.\n", port); } else { if (VALID_ENTRIES(rx_entries)) { umdev->rx.ring.entries = rx_entries; umdev->rx.ring.ent_mask = rx_entries - 1; } else { udma_err("Rx elements must be a power of 2 between %d and %d\n", UDMA_RING_MIN_ENTRIES, UDMA_RING_MAX_ENTRIES); } if (VALID_ENTRIES(tx_entries)) { umdev->tx.ring.entries = tx_entries; umdev->tx.ring.ent_mask = tx_entries - 1; } else { udma_err("Tx elements must be a power of 2 between %d and %d\n", UDMA_RING_MIN_ENTRIES, UDMA_RING_MAX_ENTRIES); } } mutex_unlock(&umdev->mutex); } kfree(str); return count; } int proc_rx_buf_control(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { char *str; int port = -1, rx_size = -1, rx_skb_res = 0; u8 argc = 0; char *argv[3]; struct udma_device *umdev = NULL; str = kmalloc(count, GFP_KERNEL); if (!str) return -ENOMEM; if (copy_from_user(str, buffer, count)) return -EFAULT; udma_parse_args(str,&argc,argv, 3); if (argc > 1) { port = simple_strtoul(argv[0], NULL, 10); rx_size = simple_strtoul(argv[1], NULL, 10); if (argc > 2) rx_skb_res = simple_strtoul(argv[2], NULL, 10); } if ((port < 0) || (port > 1)) udma_err("Port number must be 0 or 1.\n"); else umdev = udma_devs[port]; if (umdev == NULL) { udma_err("UDMA Driver is not available for port(%d).\n", port); } else { mutex_lock(&umdev->mutex); if (umdev->state != PORT_AVAILABLE) { udma_err("UDMA port(%d) is in use.\n", port); } else if (rx_size < UDMA_MIN_RX_SIZE || rx_size > UDMA_MAX_RX_SIZE) { udma_err("Rx buffer size must be between %d and %d\n", UDMA_MIN_RX_SIZE, UDMA_MAX_RX_SIZE); } else if (rx_skb_res < 0 || rx_skb_res > (rx_size - UDMA_MIN_RX_SIZE)) { udma_err("Max space to be reserved for requested buffer size is %d\n", rx_size - UDMA_MIN_RX_SIZE); } else { umdev->rx_udma_size = rx_size - rx_skb_res; umdev->rx_skb_reserve = rx_skb_res; } mutex_unlock(&umdev->mutex); } kfree(str); return count; } int proc_udma_help(struct seq_file *sf, void *v) { seq_printf(sf, "UDMA Statistics and Configuration:\n"); seq_printf(sf, "- To show the ports statistics:\n" " # cat /proc/udma/status\n\n"); seq_printf(sf, "- To show the current gap and burst values:\n" " # cat /proc/udma/gap_burst_status\n\n"); seq_printf(sf, "- To set new gap and burst values:\n" " # echo \" " "\" > /proc/udma/gap_burst_control\n"); seq_printf(sf, " Notes:\n" " * The PORT number must be 0 or 1.\n" " * The GAP must be 0, 16, 64, 256, 1024, 2048, 4096 or 8192\n" " idle clock cycles between bursts.\n" " * The BURST must be 4, 8, 16, 32, 64, 128\n" " bytes per DMA bus transfer.\n\n"); seq_printf(sf, "- To set the number of elements in the Rx and Tx queues:\n" " # echo \" \" > " "/proc/udma/queue_control\n"); seq_printf(sf, " Notes:\n" " * The PORT number must be 0 or 1.\n" " * The ELEMENTS values must be a power of two.\n\n"); seq_printf(sf, "- To set Rx buffers configuration:\n" " # echo \" \" > " "/proc/udma/rx_buf_control\n"); seq_printf(sf, " Notes:\n" " * The PORT number must be 0 or 1.\n" " * The RX_BUF_SIZE is the total size, in bytes, of the buffers\n" " allocated by the UDMA driver to receive data.\n" " * The RX_BUF_RESERVE is optional and represents the number of bytes\n" " to be reserved in the Rx buffers for the UDMA client (i.e. for a\n" " header required by a WiFi driver).\n\n"); seq_printf(sf, "The UDMA configuration can be changed when the port is not in use\n" "(e.g. when the interfaces using the port to be changed are all down).\n\n"); /* TODO Add help to set the ITR configuration "[PORT] [ITR_MODE] [MIN_MBPS] [MAX_MBPS] [STEP_MBPS] [PPC]" */ return 0; } int proc_udma_status_open(struct inode *inode, struct file *file) { return single_open(file, proc_udma_status, PDE_DATA(inode)); } struct file_operations proc_udma_status_fops = { .owner = THIS_MODULE, .open = proc_udma_status_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; int proc_gap_burst_status_open(struct inode *inode, struct file *file) { return single_open(file, proc_gap_burst_status, PDE_DATA(inode)); } struct file_operations proc_gap_burst_status_fops = { .owner = THIS_MODULE, .open = proc_gap_burst_status_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; int proc_itr_status_open(struct inode *inode, struct file *file) { return single_open(file, proc_itr_status, PDE_DATA(inode)); } struct file_operations proc_itr_status_fops = { .owner = THIS_MODULE, .open = proc_itr_status_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; int proc_udma_help_open(struct inode *inode, struct file *file) { return single_open(file, proc_udma_help, PDE_DATA(inode)); } struct file_operations proc_udma_help_fops = { .owner = THIS_MODULE, .open = proc_udma_help_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; struct file_operations proc_gap_burst_control_fops = { .owner = THIS_MODULE, .read = seq_read, .llseek = seq_lseek, .write = proc_gap_burst_control, }; struct file_operations proc_itr_control_fops = { .owner = THIS_MODULE, .read = seq_read, .llseek = seq_lseek, .write = proc_itr_control, }; struct file_operations proc_queue_control_fops = { .owner = THIS_MODULE, .read = seq_read, .llseek = seq_lseek, .write = proc_queue_control, }; struct file_operations proc_rx_buf_control_fops = { .owner = THIS_MODULE, .read = seq_read, .llseek = seq_lseek, .write = proc_rx_buf_control, }; int create_proc_filesystem(void) { struct proc_dir_entry *dir; int err = -1; dir = proc_mkdir("udma", NULL); if (!dir) { udma_err("proc mkdir error\n"); } else if (proc_create("status", 0, dir, &proc_udma_status_fops) == NULL) { udma_err("proc create entry error\n"); } else if (proc_create("gap_burst_status", 0, dir, &proc_gap_burst_status_fops) == NULL) { udma_err("proc create entry error\n"); } else if (proc_create("itr_status", 0, dir, &proc_itr_status_fops) == NULL) { udma_err("proc create entry error\n"); } else if (proc_create("help", 0, dir, &proc_udma_help_fops) == NULL) { udma_err("proc create entry error\n"); } else if (proc_create("gap_burst_control", 0, dir, &proc_gap_burst_control_fops) == NULL) { udma_err("proc create entry error\n"); } else if (proc_create("itr_control", 0, dir, &proc_itr_control_fops) == NULL) { udma_err("proc create entry error\n"); } else if (proc_create("queue_control", 0, dir, &proc_queue_control_fops) == NULL) { udma_err("proc create entry error\n"); } else if (proc_create("rx_buf_control", 0, dir, &proc_rx_buf_control_fops) == NULL) { udma_err("proc create entry error\n"); } else { err = 0; } return err; } int udma_setup_sw(void *dev) { struct udma_device *umdev = (struct udma_device *)dev; int err; static int create_proc_flag = 0; if (WARN_ON(NULL == umdev)) return -EINVAL; err = udma_dev_init(umdev); if (0 != err) { udma_err("UDMA SW layer setup failure\n"); return -ENODEV; } udma_devs[umdev->hw->port] = umdev; /* create proc filesystem utilities for UDMA tuning */ if (!create_proc_flag) { err = create_proc_filesystem(); create_proc_flag = 1; } #ifdef DEBUG if (!umdev->hw->port) { if (proc_create(UDMA_PROC_FS, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH, NULL, &udma_fops) == NULL) { udma_err("Cannot create proc entry!\n"); return -EIO; } } #endif return err; } /* udma_unregister - Called by hw layer as removal routine */ void udma_free_sw(void *dev) { struct udma_device *umdev = (struct udma_device *)dev; if (WARN_ON(umdev == NULL)) return ; hrtimer_cancel(&umdev->itr_timer); udma_devs[umdev->hw->port] = NULL; #ifdef DEBUG if (!umdev->hw->port) remove_proc_entry(UDMA_PROC_FS, NULL); #endif } EXPORT_SYMBOL_GPL(udma_setup_sw); EXPORT_SYMBOL_GPL(udma_free_sw); /* udma_main.c */