/* ========================================================================= * The Synopsys DWC ETHER QOS Software Driver and documentation (hereinafter * "Software") is an unsupported proprietary work of Synopsys, Inc. unless * otherwise expressly agreed to in writing between Synopsys and you. * * The Software IS NOT an item of Licensed Software or Licensed Product under * any End User Software License Agreement or Agreement for Licensed Product * with Synopsys or any supplement thereto. Permission is hereby granted, * free of charge, to any person obtaining a copy of this software annotated * with this license and the Software, to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject * to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * ========================================================================= */ /*!@file: DWC_ETH_QOS_desc.c * @brief: Driver functions. */ #include "DWC_ETH_QOS_yheader.h" #include "DWC_ETH_QOS_desc.h" #include "DWC_ETH_QOS_yregacc.h" #ifdef GBE_DEBUG extern uint mss_for_tso; #endif /*! * \brief API to free the transmit descriptor memory. * * \details This function is used to free the transmit descriptor memory. * * \param[in] pdata - pointer to private data structure. * * \retval void. */ static void DWC_ETH_QOS_tx_desc_free_mem(struct DWC_ETH_QOS_prv_data *pdata, uint32_t tx_qCnt) { struct DWC_ETH_QOS_tx_wrapper_descriptor *desc_data = NULL; uint32_t qInx; DBGPR("-->DWC_ETH_QOS_tx_desc_free_mem: tx_qCnt = %d\n", tx_qCnt); for (qInx = 0; qInx < tx_qCnt; qInx++) { desc_data = GET_TX_WRAPPER_DESC(qInx); if (GET_TX_DESC_PTR(qInx, 0)) { dma_free_coherent(&pdata->pdev->dev, (sizeof(tx_descriptor_t) * TX_DESC_CNT), GET_TX_DESC_PTR(qInx, 0), GET_TX_DESC_DMA_ADDR(qInx, 0)); GET_TX_DESC_PTR(qInx, 0) = NULL; } } DBGPR("<--DWC_ETH_QOS_tx_desc_free_mem\n"); } /*! * \brief API to free the receive descriptor memory. * * \details This function is used to free the receive descriptor memory. * * \param[in] pdata - pointer to private data structure. * * \retval void. */ static void DWC_ETH_QOS_rx_desc_free_mem(struct DWC_ETH_QOS_prv_data *pdata, uint32_t rx_qCnt) { struct DWC_ETH_QOS_rx_wrapper_descriptor *desc_data = NULL; uint32_t qInx = 0; DBGPR("-->DWC_ETH_QOS_rx_desc_free_mem: rx_qCnt = %d\n", rx_qCnt); for (qInx = 0; qInx < rx_qCnt; qInx++) { desc_data = GET_RX_WRAPPER_DESC(qInx); if (GET_RX_DESC_PTR(qInx, 0)) { dma_free_coherent(&pdata->pdev->dev, (sizeof(rx_descriptor_t) * RX_DESC_CNT), GET_RX_DESC_PTR(qInx, 0), GET_RX_DESC_DMA_ADDR(qInx, 0)); GET_RX_DESC_PTR(qInx, 0) = NULL; } } DBGPR("<--DWC_ETH_QOS_rx_desc_free_mem\n"); } /*! * \brief API to alloc the queue memory. * * \details This function allocates the queue structure memory. * * \param[in] pdata - pointer to private data structure. * * \return integer * * \retval 0 on success & -ve number on failure. */ static int DWC_ETH_QOS_alloc_queue_struct(struct DWC_ETH_QOS_prv_data *pdata) { int ret = 0; DBGPR("-->DWC_ETH_QOS_alloc_queue_struct: tx_queue_cnt = %d,"\ "rx_queue_cnt = %d\n", pdata->tx_queue_cnt, pdata->rx_queue_cnt); pdata->tx_queue = kzalloc(sizeof(struct DWC_ETH_QOS_tx_queue) * pdata->tx_queue_cnt, GFP_KERNEL); if (pdata->tx_queue == NULL) { printk(KERN_ALERT "ERROR: Unable to allocate Tx queue structure\n"); ret = -ENOMEM; goto err_out_tx_q_alloc_failed; } pdata->rx_queue = kzalloc(sizeof(struct DWC_ETH_QOS_rx_queue) * pdata->rx_queue_cnt, GFP_KERNEL); if (pdata->rx_queue == NULL) { printk(KERN_ALERT "ERROR: Unable to allocate Rx queue structure\n"); ret = -ENOMEM; goto err_out_rx_q_alloc_failed; } DBGPR("<--DWC_ETH_QOS_alloc_queue_struct\n"); return ret; err_out_rx_q_alloc_failed: kfree(pdata->tx_queue); err_out_tx_q_alloc_failed: return ret; } /*! * \brief API to free the queue memory. * * \details This function free the queue structure memory. * * \param[in] pdata - pointer to private data structure. * * \return void */ static void DWC_ETH_QOS_free_queue_struct(struct DWC_ETH_QOS_prv_data *pdata) { DBGPR("-->DWC_ETH_QOS_free_queue_struct\n"); if (pdata->tx_queue != NULL) { kfree(pdata->tx_queue); pdata->tx_queue = NULL; } if (pdata->rx_queue != NULL) { kfree(pdata->rx_queue); pdata->rx_queue = NULL; } DBGPR("<--DWC_ETH_QOS_free_queue_struct\n"); } /*! * \brief API to allocate the memory for descriptor & buffers. * * \details This function is used to allocate the memory for device * descriptors & buffers * which are used by device for data transmission & reception. * * \param[in] pdata - pointer to private data structure. * * \return integer * * \retval 0 on success & -ENOMEM number on failure. */ static int allocate_buffer_and_desc(struct DWC_ETH_QOS_prv_data *pdata) { int ret = 0; uint32_t qInx, size = 0; DBGPR("-->allocate_buffer_and_desc: TX_QUEUE_CNT = %d, "\ "RX_QUEUE_CNT = %d\n", DWC_ETH_QOS_TX_QUEUE_CNT, DWC_ETH_QOS_RX_QUEUE_CNT); size = ALIGN(sizeof(tx_descriptor_t) * TX_DESC_CNT, PAGE_SIZE); /* Allocate descriptors and buffers memory for all TX queues */ for (qInx = 0; qInx < DWC_ETH_QOS_TX_QUEUE_CNT; qInx++) { /* TX descriptors */ GET_TX_DESC_PTR(qInx, 0) = dma_alloc_coherent(&pdata->pdev->dev, size, &(GET_TX_DESC_DMA_ADDR(qInx, 0)), GFP_KERNEL); if (GET_TX_DESC_PTR(qInx, 0) == NULL) { ret = -ENOMEM; goto err_out_tx_desc; } } size = sizeof(struct DWC_ETH_QOS_tx_buffer) * TX_DESC_CNT; for (qInx = 0; qInx < DWC_ETH_QOS_TX_QUEUE_CNT; qInx++) { /* TX wrapper buffer */ GET_TX_BUF_PTR(qInx, 0) = kzalloc(size, GFP_KERNEL); if (GET_TX_BUF_PTR(qInx, 0) == NULL) { ret = -ENOMEM; goto err_out_tx_buf; } } size = ALIGN(sizeof(rx_descriptor_t) * RX_DESC_CNT, PAGE_SIZE); /* Allocate descriptors and buffers memory for all RX queues */ for (qInx = 0; qInx < DWC_ETH_QOS_RX_QUEUE_CNT; qInx++) { /* RX descriptors */ GET_RX_DESC_PTR(qInx, 0) = dma_alloc_coherent(&pdata->pdev->dev, size, &(GET_RX_DESC_DMA_ADDR(qInx, 0)), GFP_KERNEL); if (GET_RX_DESC_PTR(qInx, 0) == NULL) { ret = -ENOMEM; goto rx_alloc_failure; } } size = sizeof(struct DWC_ETH_QOS_rx_buffer) * RX_DESC_CNT; for (qInx = 0; qInx < DWC_ETH_QOS_RX_QUEUE_CNT; qInx++) { /* RX wrapper buffer */ GET_RX_BUF_PTR(qInx, 0) = kzalloc(size, GFP_KERNEL); if (GET_RX_BUF_PTR(qInx, 0) == NULL) { ret = -ENOMEM; goto err_out_rx_buf; } } DBGPR("<--allocate_buffer_and_desc\n"); return ret; err_out_rx_buf: DWC_ETH_QOS_rx_buf_free_mem(pdata, qInx); qInx = DWC_ETH_QOS_RX_QUEUE_CNT; rx_alloc_failure: DWC_ETH_QOS_rx_desc_free_mem(pdata, qInx); qInx = DWC_ETH_QOS_TX_QUEUE_CNT; err_out_tx_buf: DWC_ETH_QOS_tx_buf_free_mem(pdata, qInx); qInx = DWC_ETH_QOS_TX_QUEUE_CNT; err_out_tx_desc: DWC_ETH_QOS_tx_desc_free_mem(pdata, qInx); return ret; } /*! * \brief API to initialize the transmit descriptors. * * \details This function is used to initialize transmit descriptors. * Each descriptors are assigned a buffer. The base/starting address * of the descriptors is updated in device register if required & all * the private data structure variables related to transmit * descriptor handling are updated in this function. * * \param[in] pdata - pointer to private data structure. * * \return void. */ static void DWC_ETH_QOS_wrapper_tx_descriptor_init_single_q( struct DWC_ETH_QOS_prv_data *pdata, uint32_t qInx) { int i; struct DWC_ETH_QOS_tx_wrapper_descriptor *desc_data = GET_TX_WRAPPER_DESC(qInx); struct DWC_ETH_QOS_tx_buffer *buffer = GET_TX_BUF_PTR(qInx, 0); tx_descriptor_t *desc = GET_TX_DESC_PTR(qInx, 0); dma_addr_t desc_dma = GET_TX_DESC_DMA_ADDR(qInx, 0); hw_interface_t *hw_if = &(pdata->hw_if); DBGPR("-->DWC_ETH_QOS_wrapper_tx_descriptor_init_single_q: "\ "qInx = %u\n", qInx); for (i = 0; i < TX_DESC_CNT; i++) { GET_TX_DESC_PTR(qInx, i) = &desc[i]; GET_TX_DESC_DMA_ADDR(qInx, i) = (desc_dma + sizeof(tx_descriptor_t) * i); GET_TX_BUF_PTR(qInx, i) = &buffer[i]; } desc_data->cur_tx = 0; desc_data->dirty_tx = 0; desc_data->queue_stopped = 0; desc_data->tx_pkt_queued = 0; desc_data->packet_count = 0; desc_data->free_desc_cnt = TX_DESC_CNT; hw_if->tx_desc_init(pdata, qInx); desc_data->cur_tx = 0; DBGPR("<--DWC_ETH_QOS_wrapper_tx_descriptor_init_single_q\n"); } /*! * \brief API to initialize the receive descriptors. * * \details This function is used to initialize receive descriptors. * skb buffer is allocated & assigned for each descriptors. The base/starting * address of the descriptors is updated in device register if required and * all the private data structure variables related to receive descriptor * handling are updated in this function. * * \param[in] pdata - pointer to private data structure. * * \return void. */ static void DWC_ETH_QOS_wrapper_rx_descriptor_init_single_q( struct DWC_ETH_QOS_prv_data *pdata, uint32_t qInx) { int i; struct DWC_ETH_QOS_rx_wrapper_descriptor *desc_data = GET_RX_WRAPPER_DESC(qInx); struct DWC_ETH_QOS_rx_buffer *buffer = GET_RX_BUF_PTR(qInx, 0); rx_descriptor_t *desc = GET_RX_DESC_PTR(qInx, 0); dma_addr_t desc_dma = GET_RX_DESC_DMA_ADDR(qInx, 0); hw_interface_t *hw_if = &(pdata->hw_if); DBGPR("-->DWC_ETH_QOS_wrapper_rx_descriptor_init_single_q: "\ "qInx = %u\n", qInx); memset(buffer, 0, (sizeof(struct DWC_ETH_QOS_rx_buffer) * RX_DESC_CNT)); for (i = 0; i < RX_DESC_CNT; i++) { GET_RX_DESC_PTR(qInx, i) = &desc[i]; GET_RX_DESC_DMA_ADDR(qInx, i) = (desc_dma + sizeof(rx_descriptor_t) * i); GET_RX_BUF_PTR(qInx, i) = &buffer[i]; /* allocate skb & assign to each desc */ if (pdata->alloc_rx_buf(pdata, GET_RX_BUF_PTR(qInx, i), (pdata->power_state & DWC_ETH_QOS_NETIP_PWRUP)? GFP_ATOMIC : GFP_KERNEL)) break; wmb(); } desc_data->cur_rx = 0; desc_data->dirty_rx = 0; desc_data->skb_realloc_idx = 0; desc_data->skb_realloc_threshold = MIN_RX_DESC_CNT; desc_data->pkt_received = 0; desc_data->cur_rx = 0; hw_if->rx_desc_init(pdata, qInx); DBGPR("<--DWC_ETH_QOS_wrapper_rx_descriptor_init_single_q\n"); } static void DWC_ETH_QOS_wrapper_tx_descriptor_init(struct DWC_ETH_QOS_prv_data *pdata) { uint32_t qInx; DBGPR("-->DWC_ETH_QOS_wrapper_tx_descriptor_init\n"); for (qInx = 0; qInx < DWC_ETH_QOS_TX_QUEUE_CNT; qInx++) { DWC_ETH_QOS_wrapper_tx_descriptor_init_single_q(pdata, qInx); } DBGPR("<--DWC_ETH_QOS_wrapper_tx_descriptor_init\n"); } static void DWC_ETH_QOS_wrapper_rx_descriptor_init(struct DWC_ETH_QOS_prv_data *pdata) { struct DWC_ETH_QOS_rx_queue * rx_queue = NULL; uint32_t qInx; DBGPR("-->DWC_ETH_QOS_wrapper_rx_descriptor_init\n"); for (qInx = 0; qInx < DWC_ETH_QOS_RX_QUEUE_CNT; qInx++) { rx_queue = GET_RX_QUEUE_PTR(qInx); rx_queue->pdata = pdata; DWC_ETH_QOS_wrapper_rx_descriptor_init_single_q(pdata, qInx); } DBGPR("<--DWC_ETH_QOS_wrapper_rx_descriptor_init\n"); } /*! * \brief API to free the receive descriptor & buffer memory. * * \details This function is used to free the receive descriptor & buffer memory. * * \param[in] pdata - pointer to private data structure. * * \retval void. */ static void DWC_ETH_QOS_rx_free_mem(struct DWC_ETH_QOS_prv_data *pdata) { DBGPR("-->DWC_ETH_QOS_rx_free_mem\n"); /* free RX descriptor */ DWC_ETH_QOS_rx_desc_free_mem(pdata, DWC_ETH_QOS_RX_QUEUE_CNT); /* free RX skb's */ DWC_ETH_QOS_rx_skb_free_mem(pdata, DWC_ETH_QOS_RX_QUEUE_CNT); /* free RX wrapper buffer */ DWC_ETH_QOS_rx_buf_free_mem(pdata, DWC_ETH_QOS_RX_QUEUE_CNT); DBGPR("<--DWC_ETH_QOS_rx_free_mem\n"); } /*! * \brief API to free the transmit descriptor & buffer memory. * * \details This function is used to free the transmit descriptor * & buffer memory. * * \param[in] pdata - pointer to private data structure. * * \retval void. */ static void DWC_ETH_QOS_tx_free_mem(struct DWC_ETH_QOS_prv_data *pdata) { DBGPR("-->DWC_ETH_QOS_tx_free_mem\n"); /* free TX descriptor */ DWC_ETH_QOS_tx_desc_free_mem(pdata, DWC_ETH_QOS_TX_QUEUE_CNT); /* free TX buffer */ DWC_ETH_QOS_tx_buf_free_mem(pdata, DWC_ETH_QOS_TX_QUEUE_CNT); DBGPR("<--DWC_ETH_QOS_tx_free_mem\n"); } /*! * \details This function is invoked by other function to free * the tx socket buffers. * * \param[in] pdata – pointer to private data structure. * * \return void */ static void DWC_ETH_QOS_tx_skb_free_mem_single_q(struct DWC_ETH_QOS_prv_data *pdata, uint32_t qInx) { uint32_t i; struct DWC_ETH_QOS_tx_buffer *buffer = NULL; DBGPR("-->DWC_ETH_QOS_tx_skb_free_mem_single_q: qInx = %u\n", qInx); for (i = 0; i < TX_DESC_CNT; i++) { buffer = GET_TX_BUF_PTR(qInx, i); if (buffer) DWC_ETH_QOS_unmap_tx_skb(pdata, buffer); } DBGPR("<--DWC_ETH_QOS_tx_skb_free_mem_single_q\n"); } /*! * \brief API to free the transmit descriptor skb memory. * * \details This function is used to free the transmit descriptor skb memory. * * \param[in] pdata - pointer to private data structure. * * \retval void. */ static void DWC_ETH_QOS_tx_skb_free_mem(struct DWC_ETH_QOS_prv_data *pdata, uint32_t tx_qCnt) { uint32_t qInx; DBGPR("-->DWC_ETH_QOS_tx_skb_free_mem: tx_qCnt = %d\n", tx_qCnt); for (qInx = 0; qInx < tx_qCnt; qInx++) DWC_ETH_QOS_tx_skb_free_mem_single_q(pdata, qInx); DBGPR("<--DWC_ETH_QOS_tx_skb_free_mem\n"); } /*! * \details This function is invoked by other function to free * the rx socket buffers. * * \param[in] pdata – pointer to private data structure. * * \return void */ static void DWC_ETH_QOS_rx_skb_free_mem_single_q(struct DWC_ETH_QOS_prv_data *pdata, uint32_t qInx) { struct DWC_ETH_QOS_rx_wrapper_descriptor *desc_data = GET_RX_WRAPPER_DESC(qInx); struct DWC_ETH_QOS_rx_buffer *buffer = NULL; uint32_t i; DBGPR("-->DWC_ETH_QOS_rx_skb_free_mem_single_q: qInx = %u\n", qInx); for (i = 0; i < RX_DESC_CNT; i++) { buffer = GET_RX_BUF_PTR(qInx, i); if (buffer) { DWC_ETH_QOS_unmap_rx_skb(pdata, buffer); } } /* there are also some cached data from a chained rx */ if (desc_data->skb_top) dev_kfree_skb_any(desc_data->skb_top); desc_data->skb_top = NULL; DBGPR("<--DWC_ETH_QOS_rx_skb_free_mem_single_q\n"); } /*! * \brief API to free the receive descriptor skb memory. * * \details This function is used to free the receive descriptor skb memory. * * \param[in] pdata - pointer to private data structure. * * \retval void. */ static void DWC_ETH_QOS_rx_skb_free_mem(struct DWC_ETH_QOS_prv_data *pdata, uint32_t rx_qCnt) { uint32_t qInx; DBGPR("-->DWC_ETH_QOS_rx_skb_free_mem: rx_qCnt = %d\n", rx_qCnt); for (qInx = 0; qInx < rx_qCnt; qInx++) DWC_ETH_QOS_rx_skb_free_mem_single_q(pdata, qInx); DBGPR("<--DWC_ETH_QOS_rx_skb_free_mem\n"); } /*! * \brief API to free the transmit descriptor wrapper buffer memory. * * \details This function is used to free the transmit descriptor wrapper buffer memory. * * \param[in] pdata - pointer to private data structure. * * \retval void. */ static void DWC_ETH_QOS_tx_buf_free_mem(struct DWC_ETH_QOS_prv_data *pdata, uint32_t tx_qCnt) { uint32_t qInx; DBGPR("-->DWC_ETH_QOS_tx_buf_free_mem: tx_qCnt = %d\n", tx_qCnt); for (qInx = 0; qInx < tx_qCnt; qInx++) { /* free TX buffer */ if (GET_TX_BUF_PTR(qInx, 0)) { kfree(GET_TX_BUF_PTR(qInx, 0)); GET_TX_BUF_PTR(qInx, 0) = NULL; } } DBGPR("<--DWC_ETH_QOS_tx_buf_free_mem\n"); } /*! * \brief API to free the receive descriptor wrapper buffer memory. * * \details This function is used to free the receive descriptor wrapper buffer memory. * * \param[in] pdata - pointer to private data structure. * * \retval void. */ static void DWC_ETH_QOS_rx_buf_free_mem(struct DWC_ETH_QOS_prv_data *pdata, uint32_t rx_qCnt) { uint32_t qInx = 0; DBGPR("-->DWC_ETH_QOS_rx_buf_free_mem: rx_qCnt = %d\n", rx_qCnt); for (qInx = 0; qInx < rx_qCnt; qInx++) { if (GET_RX_BUF_PTR(qInx, 0)) { kfree(GET_RX_BUF_PTR(qInx, 0)); GET_RX_BUF_PTR(qInx, 0) = NULL; } } DBGPR("<--DWC_ETH_QOS_rx_buf_free_mem\n"); } /*! * \brief api to handle tso * * \details This function is invoked by start_xmit functions. This function * will get all the tso details like MSS(Maximum Segment Size), packet header length, * packet pay load length and tcp header length etc if the given skb has tso * packet and store it in other wrapper tx structure for later usage. * * \param[in] dev – pointer to net device structure. * \param[in] skb – pointer to socket buffer structure. * * \return integer * * \retval 1 on success, -ve no failure and 0 if not tso pkt * */ static int DWC_ETH_QOS_handle_tso(struct net_device *dev, struct sk_buff *skb) { struct DWC_ETH_QOS_prv_data *pdata = netdev_priv(dev); tx_pkt_features_t *tx_pkt_features = &pdata->tx_pkt_features; int ret = 1; DBGPR("-->DWC_ETH_QOS_handle_tso\n"); if (skb_is_gso(skb) == 0) { DBGPR("This is not a TSO/LSO/GSO packet\n"); return 0; } DBGPR("Got TSO packet\n"); if (skb_header_cloned(skb)) { ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (ret) return ret; } /* get TSO details */ #ifdef GBE_DEBUG if (mss_for_tso) tx_pkt_features->mss = mss_for_tso; else #endif tx_pkt_features->mss = skb_shinfo(skb)->gso_size; tx_pkt_features->hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); tx_pkt_features->pay_len = (skb->len - tx_pkt_features->hdr_len); tx_pkt_features->tcp_hdr_len = tcp_hdrlen(skb); DBGPR("mss = %lu\n", tx_pkt_features->mss); DBGPR("hdr_len = %lu\n", tx_pkt_features->hdr_len); DBGPR("pay_len = %lu\n", tx_pkt_features->pay_len); DBGPR("tcp_hdr_len = %lu\n", tx_pkt_features->tcp_hdr_len); DBGPR("<--DWC_ETH_QOS_handle_tso\n"); return ret; } /* returns 0 on success and -ve on failure */ static int DWC_ETH_QOS_map_non_page_buffs(struct DWC_ETH_QOS_prv_data *pdata, struct DWC_ETH_QOS_tx_buffer *buffer, struct DWC_ETH_QOS_tx_buffer *prev_buffer, struct sk_buff *skb, unsigned int offset, unsigned int size) { uint32_t len = 0; DBGPR("-->DWC_ETH_QOS_map_non_page_buffs\n"); /* Fill the second pointer in prev_buffer first, if available */ if (prev_buffer && !prev_buffer->dma2) { len = MIN(size, DWC_ETH_QOS_MAX_DATA_PER_TX_BUF); prev_buffer->dma2 = dma_map_single((&pdata->pdev->dev), (skb->data + offset), len, DMA_TO_DEVICE); if (dma_mapping_error((&pdata->pdev->dev), prev_buffer->dma2)) { printk(KERN_ALERT "failed to do the dma map\n"); return - ENOMEM; } prev_buffer->len2 = len; prev_buffer->buf2_mapped_as_page = Y_FALSE; size -= len; offset += len; } /* Fill the first pointer in buffer */ if (size) { len = MIN(size, DWC_ETH_QOS_MAX_DATA_PER_TX_BUF); buffer->dma = dma_map_single((&pdata->pdev->dev), (skb->data + offset), len, DMA_TO_DEVICE); if (dma_mapping_error((&pdata->pdev->dev), buffer->dma)) { printk(KERN_ALERT "failed to do the dma map\n"); return - ENOMEM; } buffer->len = len; buffer->buf1_mapped_as_page = Y_FALSE; size -= len; offset += len; } /* Fill the second pointer in buffer, if required */ if (size) { BUG_ON(size > DWC_ETH_QOS_MAX_DATA_PER_TX_BUF); buffer->dma2 = dma_map_single((&pdata->pdev->dev), (skb->data + offset), size, DMA_TO_DEVICE); if (dma_mapping_error((&pdata->pdev->dev), buffer->dma2)) { printk(KERN_ALERT "failed to do the dma map\n"); return - ENOMEM; } buffer->len2 = size; buffer->buf2_mapped_as_page = Y_FALSE; } DBGPR("<--DWC_ETH_QOS_map_non_page_buffs\n"); return 0; } /* returns 0 on success and -ve on failure */ static int DWC_ETH_QOS_map_page_buffs(struct DWC_ETH_QOS_prv_data *pdata, struct DWC_ETH_QOS_tx_buffer *buffer, struct DWC_ETH_QOS_tx_buffer *prev_buffer, struct skb_frag_struct *frag, unsigned int offset, unsigned int size) { uint32_t len = 0; DBGPR("-->DWC_ETH_QOS_map_page_buffs\n"); /* Fill the second buffer pointer in prev_buffer first, if available */ if (prev_buffer && !prev_buffer->dma2) { len = MIN(size, DWC_ETH_QOS_MAX_DATA_PER_TX_BUF); prev_buffer->dma2 = dma_map_page((&pdata->pdev->dev), frag->page.p, frag->page_offset + offset, len, DMA_TO_DEVICE); if (dma_mapping_error((&pdata->pdev->dev), prev_buffer->dma2)) { printk(KERN_ALERT "failed to do the dma map\n"); return -ENOMEM; } prev_buffer->len2 = len; prev_buffer->buf2_mapped_as_page = Y_TRUE; size -= len; offset += len; } /* Fill the first pointer in buffer */ if (size) { len = MIN(size, DWC_ETH_QOS_MAX_DATA_PER_TX_BUF); buffer->dma = dma_map_page((&pdata->pdev->dev), frag->page.p, frag->page_offset + offset, len, DMA_TO_DEVICE); if (dma_mapping_error((&pdata->pdev->dev), buffer->dma)) { printk(KERN_ALERT "failed to do the dma map\n"); return -ENOMEM; } buffer->len = len; buffer->buf1_mapped_as_page = Y_TRUE; size -= len; offset += len; } /* Fill the second pointer in buffer, if required */ if (size) { BUG_ON(size > DWC_ETH_QOS_MAX_DATA_PER_TX_BUF); buffer->dma2 = dma_map_page((&pdata->pdev->dev), frag->page.p, frag->page_offset + offset, size, DMA_TO_DEVICE); if (dma_mapping_error((&pdata->pdev->dev), buffer->dma2)) { printk(KERN_ALERT "failed to do the dma map\n"); return - ENOMEM; } buffer->len2 = size; buffer->buf2_mapped_as_page = Y_TRUE; } DBGPR("<--DWC_ETH_QOS_map_page_buffs\n"); return 0; } /*! * \details This function is invoked by start_xmit functions. This function * will get the dma/physical address of the packet to be transmitted and * its length. All this information about the packet to be transmitted is * stored in private data structure and same is used later in the driver to * setup the descriptor for transmission. * * \param[in] dev – pointer to net device structure. * \param[in] skb – pointer to socket buffer structure. * * \return unsigned int * * \retval count – number of packet to be programmed in the descriptor or * zero on failure. */ static unsigned int DWC_ETH_QOS_map_skb(struct net_device *dev, struct sk_buff *skb) { struct DWC_ETH_QOS_prv_data *pdata = netdev_priv(dev); uint32_t qInx = skb_get_queue_mapping(skb); struct DWC_ETH_QOS_tx_wrapper_descriptor *desc_data = GET_TX_WRAPPER_DESC(qInx); struct DWC_ETH_QOS_tx_buffer *buffer = NULL; struct DWC_ETH_QOS_tx_buffer *prev_buffer = NULL; tx_pkt_features_t *tx_pkt_features = &pdata->tx_pkt_features; uint32_t varvlan_pkt; int index = (int)desc_data->cur_tx; unsigned int frag_cnt = skb_shinfo(skb)->nr_frags; unsigned int hdr_len = 0; unsigned int i; unsigned int count = 0, xtra_count = 0, offset = 0, size; int len; int vartso_enable = 0; int ret; DBGPR("-->DWC_ETH_QOS_map_skb: cur_tx = %d, qInx = %u\n", desc_data->cur_tx, qInx); #ifdef DWC_ETH_QOS_ENABLE_VLAN_TAG varvlan_pkt = VAR32_GET_BIT(tx_pkt_features->pkt_attributes, TX_PKT_FEATURES_ATTR_VLAN_PKT); if (varvlan_pkt == 0x1) { DBGPR("Skipped preparing index %d "\ "(VLAN Context descriptor)\n\n", index); INCR_TX_DESC_INDEX(index, 1); xtra_count++; } #endif #ifdef DWC_ETH_QOS_ENABLE_DVLAN if (pdata->via_reg_or_desc) { DBGPR("Skipped preparing index %d "\ "(Double VLAN Context descriptor)\n\n", index); INCR_TX_DESC_INDEX(index, 1); xtra_count++; } #endif /* End of DWC_ETH_QOS_ENABLE_DVLAN */ vartso_enable = VAR32_GET_BIT(tx_pkt_features->pkt_attributes, TX_PKT_FEATURES_ATTR_TSO_ENABLE); if (vartso_enable) { if (desc_data->default_mss != tx_pkt_features->mss) { /* keep space for CONTEXT descriptor in the RING */ INCR_TX_DESC_INDEX(index, 1); xtra_count++; } hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); len = hdr_len; } else { len = (skb->len - skb->data_len); } while (len) { size = MIN(len, DWC_ETH_QOS_MAX_DATA_PER_TXD); buffer = GET_TX_BUF_PTR(qInx, index); ret = DWC_ETH_QOS_map_non_page_buffs(pdata, buffer, prev_buffer, skb, offset, size); if (ret < 0) goto err_out_dma_map_fail; len -= size; offset += size; if (buffer->dma != 0) { prev_buffer = buffer; INCR_TX_DESC_INDEX(index, 1); count++; } } /* Process remaining pay load in skb->data in case of TSO packet */ if (vartso_enable) { len = ((skb->len - skb->data_len) - hdr_len); while (len > 0) { size = MIN(len, DWC_ETH_QOS_MAX_DATA_PER_TXD); buffer = GET_TX_BUF_PTR(qInx, index); ret = DWC_ETH_QOS_map_non_page_buffs(pdata, buffer, prev_buffer, skb, offset, size); if (ret < 0) goto err_out_dma_map_fail; len -= size; offset += size; if (buffer->dma != 0) { prev_buffer = buffer; INCR_TX_DESC_INDEX(index, 1); count++; } } } /* Process fragmented skb's */ for (i = 0; i < frag_cnt; i++) { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; len = frag->size; offset = 0; while (len) { size = MIN(len, DWC_ETH_QOS_MAX_DATA_PER_TXD); buffer = GET_TX_BUF_PTR(qInx, index); ret = DWC_ETH_QOS_map_page_buffs(pdata, buffer, prev_buffer, frag, offset, size); if (ret < 0) goto err_out_dma_map_fail; len -= size; offset += size; if (buffer->dma != 0) { prev_buffer = buffer; INCR_TX_DESC_INDEX(index, 1); count++; } } } desc_data->packet_count = count; if (!count) { xtra_count = 0; } else if (buffer->dma) { buffer->skb = skb; } else if (prev_buffer) { prev_buffer->skb = skb; } else { goto err_out_dma_map_fail; } DBGPR("<--DWC_ETH_QOS_map_skb\n"); return (count + xtra_count); err_out_dma_map_fail: printk(KERN_ALERT "Tx DMA map failed\n"); for (; count > 0; count--) { DECR_TX_DESC_INDEX(index); buffer = GET_TX_BUF_PTR(qInx, index); DWC_ETH_QOS_unmap_tx_skb(pdata, buffer); } return 0; } /*! * \brief API to release the skb. * * \details This function is called in *_tx_interrupt function to release * the skb for the successfully transmited packets. * * \param[in] pdata - pointer to private data structure. * \param[in] buffer - pointer to *_tx_buffer structure * * \return void */ static void DWC_ETH_QOS_unmap_tx_skb(struct DWC_ETH_QOS_prv_data *pdata, struct DWC_ETH_QOS_tx_buffer *buffer) { DBGPR("-->DWC_ETH_QOS_unmap_tx_skb\n"); if (buffer->dma) { if (buffer->buf1_mapped_as_page == Y_TRUE) dma_unmap_page((&pdata->pdev->dev), buffer->dma, buffer->len, DMA_TO_DEVICE); else dma_unmap_single((&pdata->pdev->dev), buffer->dma, buffer->len, DMA_TO_DEVICE); buffer->dma = 0; buffer->len = 0; } if (buffer->dma2) { if (buffer->buf2_mapped_as_page == Y_TRUE) dma_unmap_page((&pdata->pdev->dev), buffer->dma2, buffer->len2, DMA_TO_DEVICE); else dma_unmap_single((&pdata->pdev->dev), buffer->dma2, buffer->len2, DMA_TO_DEVICE); buffer->dma2 = 0; buffer->len2 = 0; } if (buffer->skb != NULL) { dev_kfree_skb_any(buffer->skb); buffer->skb = NULL; } DBGPR("<--DWC_ETH_QOS_unmap_tx_skb\n"); } /*! * \details This function is invoked by other function for releasing the socket * buffer which are received by device and passed to upper layer. * * \param[in] pdata – pointer to private device structure. * \param[in] buffer – pointer to rx wrapper buffer structure. * * \return void */ static void DWC_ETH_QOS_unmap_rx_skb(struct DWC_ETH_QOS_prv_data *pdata, struct DWC_ETH_QOS_rx_buffer *buffer) { DBGPR("-->DWC_ETH_QOS_unmap_rx_skb\n"); /* Unmap the first buffer */ if (buffer->dma) { if (pdata->rx_split_hdr) { dma_unmap_single(&pdata->pdev->dev, buffer->dma, (2*buffer->rx_hdr_size), DMA_FROM_DEVICE); } else if (pdata->dev->mtu > DWC_ETH_QOS_ETH_FRAME_LEN) { dma_unmap_page(&pdata->pdev->dev, buffer->dma, PAGE_SIZE, DMA_FROM_DEVICE); } else { dma_unmap_single(&pdata->pdev->dev, buffer->dma, pdata->rx_buffer_len, DMA_FROM_DEVICE); } buffer->dma = 0; } /* unmap the second buffer */ if (buffer->dma2) { dma_unmap_page(&pdata->pdev->dev, buffer->dma2, PAGE_SIZE, DMA_FROM_DEVICE); buffer->dma2 = 0; } /* page1 will be present only if JUMBO is enabled */ if (buffer->page) { put_page(buffer->page); buffer->page = NULL; } /* page2 will be present if JUMBO/SPLIT HDR is enabled */ if (buffer->page2) { put_page(buffer->page2); buffer->page2 = NULL; } if (buffer->skb) { dev_kfree_skb_any(buffer->skb); buffer->skb = NULL; } DBGPR("<--DWC_ETH_QOS_unmap_rx_skb\n"); } /*! * \brief API to re-allocate the new skb to rx descriptors. * * \details This function is used to re-allocate & re-assign the new skb to * receive descriptors from which driver has read the data. Also ownership bit * and other bits are reset so that device can reuse the descriptors. * * \param[in] pdata - pointer to private data structure. * * \return void. */ static void DWC_ETH_QOS_re_alloc_skb(struct DWC_ETH_QOS_prv_data *pdata, uint32_t qInx) { int i; struct DWC_ETH_QOS_rx_wrapper_descriptor *desc_data = GET_RX_WRAPPER_DESC(qInx); struct DWC_ETH_QOS_rx_buffer *buffer = NULL; hw_interface_t *hw_if = &pdata->hw_if; int tail_idx; DBGPR("-->DWC_ETH_QOS_re_alloc_skb: desc_data->skb_realloc_idx = %d "\ " qInx = %u\n", desc_data->skb_realloc_idx, qInx); for (i = 0; i < desc_data->dirty_rx; i++) { buffer = GET_RX_BUF_PTR(qInx, desc_data->skb_realloc_idx); /* allocate skb & assign to each desc */ if (pdata->alloc_rx_buf(pdata, buffer, GFP_ATOMIC)) { printk(KERN_ALERT "Failed to re allocate skb\n"); pdata->xstats.q_re_alloc_rx_buf_failed[qInx]++; break; } wmb(); hw_if->rx_desc_reset(desc_data->skb_realloc_idx, pdata, buffer->inte, qInx); INCR_RX_DESC_INDEX(desc_data->skb_realloc_idx, 1); } tail_idx = desc_data->skb_realloc_idx; DECR_RX_DESC_INDEX(tail_idx); hw_if->update_rx_tail_ptr(qInx, GET_RX_DESC_DMA_ADDR(qInx, tail_idx)); desc_data->dirty_rx = 0; DBGPR("<--DWC_ETH_QOS_re_alloc_skb\n"); return; } /*! * \brief API to initialize the function pointers. * * \details This function is called in probe to initialize all the function * pointers which are used in other functions to manage edscriptors. * * \param[in] desc_if - pointer to desc_if_struct structure. * * \return void. */ void DWC_ETH_QOS_init_function_ptrs_desc(struct desc_if_struct *desc_if) { DBGPR("-->DWC_ETH_QOS_init_function_ptrs_desc\n"); desc_if->alloc_queue_struct = DWC_ETH_QOS_alloc_queue_struct; desc_if->free_queue_struct = DWC_ETH_QOS_free_queue_struct; desc_if->alloc_buff_and_desc = allocate_buffer_and_desc; desc_if->realloc_skb = DWC_ETH_QOS_re_alloc_skb; desc_if->unmap_rx_skb = DWC_ETH_QOS_unmap_rx_skb; desc_if->unmap_tx_skb = DWC_ETH_QOS_unmap_tx_skb; desc_if->map_tx_skb = DWC_ETH_QOS_map_skb; desc_if->tx_free_mem = DWC_ETH_QOS_tx_free_mem; desc_if->rx_free_mem = DWC_ETH_QOS_rx_free_mem; desc_if->wrapper_tx_desc_init = DWC_ETH_QOS_wrapper_tx_descriptor_init; desc_if->wrapper_tx_desc_init_single_q = DWC_ETH_QOS_wrapper_tx_descriptor_init_single_q; desc_if->wrapper_rx_desc_init = DWC_ETH_QOS_wrapper_rx_descriptor_init; desc_if->wrapper_rx_desc_init_single_q = DWC_ETH_QOS_wrapper_rx_descriptor_init_single_q; desc_if->rx_skb_free_mem = DWC_ETH_QOS_rx_skb_free_mem; desc_if->rx_skb_free_mem_single_q = DWC_ETH_QOS_rx_skb_free_mem_single_q; desc_if->tx_skb_free_mem = DWC_ETH_QOS_tx_skb_free_mem; desc_if->tx_skb_free_mem_single_q = DWC_ETH_QOS_tx_skb_free_mem_single_q; desc_if->handle_tso = DWC_ETH_QOS_handle_tso; DBGPR("<--DWC_ETH_QOS_init_function_ptrs_desc\n"); }