/* ========================================================================= * The Synopsys DWC ETHER QOS Software Driver and documentation (hereinafter * "Software") is an unsupported proprietary work of Synopsys, Inc. unless * otherwise expressly agreed to in writing between Synopsys and you. * * The Software IS NOT an item of Licensed Software or Licensed Product under * any End User Software License Agreement or Agreement for Licensed Product * with Synopsys or any supplement thereto. Permission is hereby granted, * free of charge, to any person obtaining a copy of this software annotated * with this license and the Software, to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject * to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * ========================================================================= */ /*!@file: DWC_ETH_QOS_pci.c * @brief: Driver functions. */ #include #include #include "DWC_ETH_QOS_yheader.h" #include "DWC_ETH_QOS_yregacc.h" int DWC_ETH_QOS_runtime_suspend(struct device *dev); int DWC_ETH_QOS_runtime_resume(struct device *dev); static const struct dev_pm_ops DWC_ETH_QOS_rpm_ops = { SET_RUNTIME_PM_OPS(DWC_ETH_QOS_runtime_suspend, DWC_ETH_QOS_runtime_resume, NULL) }; #define GMAC5_AUTOSUSPEND_DELAY_IN_MILLISEC 100 /** Assuming an average packet size of 1522 bytes the total number of packets in one second at 2Gbps would be: Num of packets = 2000000000 / (1522 * 8) = 164258 To process an interrupt when there are 32 packets in queue: Num interrupts per sec = 164258 / 32 = 5133 */ #define DEFAULT_NUM_IPS (5133) static uint8_t dev_addr[6] = {0, 0x55, 0x7b, 0xb5, 0x7d, 0xf7}; uint32_t dwc_eth_qos_pci_base_addr; bool config_prints = false; static bool msi_mode = true; static bool tso_enable = true; static bool vlan_filter_enable = false; /* Enabling only one queue by default because MAC sends most of the traffic to MTL FIFO 0. */ static short num_of_queues = 1; /* default value for gmac5 speed when connected to 4 is 2.5G */ unsigned int gmac5to4_speed = 2500; module_param(config_prints, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(config_prints, "Enable configuration prints"); module_param(tso_enable, bool, S_IRUGO); MODULE_PARM_DESC(tso_enable, "Enable/disable TSO"); module_param(msi_mode, bool, S_IRUGO); MODULE_PARM_DESC(msi_mode, "Enable/disable MSI interrupts mode"); module_param(num_of_queues, short, S_IRUGO); MODULE_PARM_DESC(num_of_queues, "Number of device queues"); module_param(vlan_filter_enable, bool, S_IRUGO); MODULE_PARM_DESC(vlan_filter_enable, "Enable/disable VLAN hash filter"); module_param(gmac5to4_speed, uint, S_IRUGO); MODULE_PARM_DESC(gmac5to4_speed, "phy speed when connected to gmac4 [10 | 100 | 1000 | 2500 | 5000]"); #ifdef GBE_DEBUG bool print_tx_pkts = false; bool print_rx_pkts = false; bool print_desc = false; uint metadata_on_crc = 0; uint mss_for_tso = 0; module_param(print_tx_pkts, bool, S_IRUGO); MODULE_PARM_DESC(print_tx_pkts, "Dump Tx packets"); module_param(print_rx_pkts, bool, S_IRUGO); MODULE_PARM_DESC(print_rx_pkts, "Dump Rx packets"); module_param(print_desc, bool, S_IRUGO); MODULE_PARM_DESC(print_desc, "Print Tx descriptors"); module_param(metadata_on_crc, uint, S_IRUGO); MODULE_PARM_DESC(metadata_on_crc, "Test metadata on CRC"); module_param(mss_for_tso, uint, S_IRUGO); MODULE_PARM_DESC(mss_for_tso, "MSS value to test TSO"); static ssize_t gbe_dbg_show(struct device *dev, struct device_attribute *attr, char *buf) { struct DWC_ETH_QOS_prv_data *pdata = NULL; struct DWC_ETH_QOS_tx_wrapper_descriptor *tx_desc_data = NULL; struct DWC_ETH_QOS_rx_wrapper_descriptor *rx_desc_data = NULL; tx_descriptor_t *tx_desc = NULL; rx_descriptor_t *rx_desc = NULL; uint32_t qInx = 0, i, j; char *st = buf; pdata = container_of(attr, struct DWC_ETH_QOS_prv_data, debug_attr); for (qInx = 0; qInx < DWC_ETH_QOS_RX_QUEUE_CNT; qInx++) { buf += sprintf(buf, "---------------------------------------\n"); tx_desc_data = GET_TX_WRAPPER_DESC(qInx); buf += sprintf(buf, "[%u]cur_tx = %d\n", qInx, tx_desc_data->cur_tx); buf += sprintf(buf, "[%u]dirty_tx = %d\n", qInx, tx_desc_data->dirty_tx); buf += sprintf(buf, "[%u]free_desc_cnt = %d\n", qInx, tx_desc_data->free_desc_cnt); buf += sprintf(buf, "[%u]queue_stopped = %d\n", qInx, tx_desc_data->queue_stopped); buf += sprintf(buf, "[%u]tx_pkt_queued = %d\n", qInx, tx_desc_data->tx_pkt_queued); i = tx_desc_data->dirty_tx; DECR_TX_DESC_INDEX(i); j = tx_desc_data->cur_tx; INCR_TX_DESC_INDEX(j, 1); while(i != j) { tx_desc = GET_TX_DESC_PTR(qInx, i); buf += sprintf(buf, "[%u:%u] 0x%08x:0x%08x:0x%08x:0x%08x\n", qInx, i, tx_desc->TDES0, tx_desc->TDES1, tx_desc->TDES2, tx_desc->TDES3); INCR_TX_DESC_INDEX(i, 1); } buf += sprintf(buf, "---------------------------------------\n"); rx_desc_data = GET_RX_WRAPPER_DESC(qInx); buf += sprintf(buf, "[%u]cur_rx = %d\n", qInx, rx_desc_data->cur_rx); buf += sprintf(buf, "[%u]dirty_rx = %d\n", qInx, rx_desc_data->dirty_rx); buf += sprintf(buf, "[%u]pkt_received = %d\n", qInx, rx_desc_data->pkt_received); i = rx_desc_data->cur_rx; rx_desc = GET_RX_DESC_PTR(qInx, i); while(!(rx_desc->RDES3 & DWC_ETH_QOS_RDESC3_OWN)) { buf += sprintf(buf, "[%u:%u] 0x%08x:0x%08x:0x%08x:0x%08x\n", qInx, i, rx_desc->RDES0, rx_desc->RDES1, rx_desc->RDES2, rx_desc->RDES3); INCR_RX_DESC_INDEX(i, 1); rx_desc = GET_RX_DESC_PTR(qInx, i); } buf += sprintf(buf, "---------------------------------------\n"); } return (buf - st); } static int gbe_handle_suspend_resume(void *args, netss_power_state_t state); static ssize_t gbe_suspend_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int value = simple_strtoul(buf, NULL, 0); struct DWC_ETH_QOS_prv_data *pdata = NULL; pdata = container_of(attr, struct DWC_ETH_QOS_prv_data, suspend_attr); gbe_handle_suspend_resume(pdata, (value)? NETSS_NETIP_POWER_STATE_OFF : NETSS_NETIP_POWER_STATE_ACTIVE); return count; } #endif //GBE_DEBUG unsigned int gbe_get_4to5_speed(void) { switch (gmac5to4_speed) { case 10: return GBE_GCR5_PHY_SPEED_10M; case 100: return GBE_GCR5_PHY_SPEED_100M; case 1000: return GBE_GCR5_PHY_SPEED_1G; case 2500: return GBE_GCR5_PHY_SPEED_2_5G; case 5000: return GBE_GCR5_PHY_SPEED_5G; default: /* not set, use 5G default value*/ CFG_PRINT("[GBE] 4to5 speed incorrect, using 5G default\n"); return GBE_GCR5_PHY_SPEED_5G; } } static int gbe_init_top_registers(void __iomem **gbe_base, unsigned int *mux_cfg) { int ret = 1; netss_dev_info_t gbe_mmio, bootcfg_mmio; void __iomem *reg_base = NULL; CFG_PRINT("[GBE] Initializing General registers\n"); if (!netss_driver_ready()) { printk(KERN_ALERT "[GBE] NetSS not ready!\n"); } else if (netss_device_get_info(NETSS_DEV_GBE, &gbe_mmio) != 0 || netss_device_get_info(NETSS_DEV_BOOTCFG, &bootcfg_mmio) != 0) { printk(KERN_ALERT "[GBE] Error getting GbE MMIO info!\n"); } else { uint32_t reg_val, reg_cfg; CFG_PRINT("[GBE] mem_iobase = 0x%08x, mem_iosize = 0x%08x\n", (unsigned int)gbe_mmio.base, (unsigned int)gbe_mmio.size); CFG_PRINT("[BOOTCFG] mem_iobase = 0x%08x, mem_iosize = 0x%08x\n", (unsigned int)bootcfg_mmio.base, (unsigned int)bootcfg_mmio.size); reg_base = (void __iomem *)ioremap_nocache(bootcfg_mmio.base, bootcfg_mmio.size); reg_val = GBE_REG_RD(BCFG2SSX_GBEM_CFG); iounmap(reg_base); CFG_PRINT("[GBE] bcfg2ssx_gbem_cfg = 0x%08x\n", reg_val); reg_cfg = VAR32_GET_BIT(reg_val, BCFG2SSX_GBEM_SNOOPED); if (!reg_cfg) { printk(KERN_ALERT "[GBE] Snooped bit not set in BootConfig!\n"); } else { reg_base = (void __iomem *)ioremap_nocache(gbe_mmio.base, gbe_mmio.size); // Disable GMAC5 core GBE_REG_WR_BIT(GBE_GCR5, GBE_GCR5_ENABLE, 0x0); // Enable GMAC5 endianess converter GBE_REG_WR_BIT(GBE_GVBECR, GBE_GVBECR_GMAC5, 0x1); // Start soft reset GBE_REG_WR_BIT(GBE_GCR5, GBE_GCR5_RESET, 0x0); msleep(1); // Stop soft reset GBE_REG_WR_BIT(GBE_GCR5, GBE_GCR5_RESET, 0x1); // Get current GMAC5 configuration reg_val = GBE_REG_RD(GBE_GCR5); // Read GBE MUX configuration *mux_cfg = GBE_REG_RD_FIELD(GBE_GMCR, GBE_GMCR_GMAC25); if (*mux_cfg == GMCR_GMAC5_TO_PHY) { CFG_PRINT("[GBE] GMAC5 to PHY!\n"); // Set RGMII PHY config VAR32_SET_FIELD(reg_val, GBE_GCR5_PHY_CFG, 0x1); // Set PHY speed to 1G VAR32_SET_FIELD(reg_val, GBE_GCR5_PHY_SPEED, GBE_GCR5_PHY_SPEED_1G); } else if (*mux_cfg == GMCR_GMAC5_TO_GMAC4) { CFG_PRINT("[GBE] GMAC5 to GMAC4!\n"); // Set GMII PHY config VAR32_SET_FIELD(reg_val, GBE_GCR5_PHY_CFG, 0x0); // Set PHY speed according to module param VAR32_SET_FIELD(reg_val, GBE_GCR5_PHY_SPEED, gbe_get_4to5_speed()); } else { CFG_PRINT("[GBE] GMAC5 is not connected!\n"); } // Set endianess (LE) VAR32_SET_BIT(reg_val, GBE_GCR5_ENDIANESS, 0x0); // Write configuration GBE_REG_WR(GBE_GCR5, reg_val); // Enable GMAC5 core GBE_REG_WR_BIT(GBE_GCR5, GBE_GCR5_ENABLE, 0x1); CFG_PRINT("[GBE] Dump registers:\n"); CFG_PRINT("[GBE] GBE_GCR5 = 0x%08x\n", GBE_REG_RD(GBE_GCR5)); CFG_PRINT("[GBE] GBE_GMCR = 0x%08x\n", GBE_REG_RD(GBE_GMCR)); CFG_PRINT("[GBE] GBE_GVBECR = 0x%08x\n", GBE_REG_RD(GBE_GVBECR)); CFG_PRINT("[GBE] GBE_GSRH = 0x%08x\n", GBE_REG_RD(GBE_GSRH)); CFG_PRINT("[GBE] GBE_GSRL = 0x%08x\n", GBE_REG_RD(GBE_GSRL)); CFG_PRINT("[GBE] GBE_ATOM_HIE = 0x%08x\n", GBE_REG_RD(GBE_ATOM_HIE)); CFG_PRINT("[GBE] GBE_ATOM_SWI = 0x%08x\n", GBE_REG_RD(GBE_ATOM_SWI)); CFG_PRINT("[GBE] GBE_ATOM_ELS = 0x%08x\n", GBE_REG_RD(GBE_ATOM_ELS)); CFG_PRINT("[GBE] GBE_ATOM_IMV = 0x%08x\n", GBE_REG_RD(GBE_ATOM_IMV)); CFG_PRINT("[GBE] GBE_ATOM_IRS = 0x%08x\n", GBE_REG_RD(GBE_ATOM_IRS)); CFG_PRINT("[GBE] GBE_ATOM_IMS = 0x%08x\n", GBE_REG_RD(GBE_ATOM_IMS)); *gbe_base = reg_base; ret = 0; } } return ret; } static void gbe_configure_IC(void __iomem *reg_base) { // Enable GMAC5 hardware interrupts GBE_REG_WR_BIT(GBE_ATOM_HIE, GBE_ATOM_INTC, 0x1); // Enable GMAC5 level interrupts GBE_REG_WR_BIT(GBE_ATOM_ELS, GBE_ATOM_INTC, 0x1); } static ssize_t gbe_speed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct DWC_ETH_QOS_prv_data *pdata = NULL; pdata = container_of(attr, struct DWC_ETH_QOS_prv_data, rate_attr); return sprintf(buf, "%d\n", gbe_config_to_speed(pdata->rate)); } static ssize_t gbe_speed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int value = simple_strtoul(buf, NULL, 0); struct DWC_ETH_QOS_prv_data *pdata = NULL; pdata = container_of(attr, struct DWC_ETH_QOS_prv_data, rate_attr); pdata->hw_if.set_speed(pdata, value); return count; } static ssize_t gbe_stats_show(struct device *dev, struct device_attribute *attr, char *buf) { struct DWC_ETH_QOS_prv_data *pdata = NULL; uint32_t qInx = 0; char *st = buf; pdata = container_of(attr, struct DWC_ETH_QOS_prv_data, stats_attr); for (qInx = 0; qInx < DWC_ETH_QOS_RX_QUEUE_CNT; qInx++) { buf += sprintf(buf, "[%u]rx_irq = %d\n", qInx, pdata->xstats.rx_normal_irq_n[qInx]); buf += sprintf(buf, "[%u]rx_bu_irq = %d\n", qInx, pdata->xstats.rx_buf_unavailable_irq_n[qInx]); buf += sprintf(buf, "[%u]rx_ps_irq = %d\n", qInx, pdata->xstats.rx_process_stopped_irq_n[qInx]); buf += sprintf(buf, "[%u]tx_irq = %d\n", qInx, pdata->xstats.tx_normal_irq_n[qInx]); buf += sprintf(buf, "[%u]tx_bu_irq = %d\n", qInx, pdata->xstats.tx_buf_unavailable_irq_n[qInx]); buf += sprintf(buf, "[%u]tx_ps_irq = %d\n", qInx, pdata->xstats.tx_process_stopped_irq_n[qInx]); } return (buf - st); } static ssize_t gbe_itr_ips_show(struct device *dev, struct device_attribute *attr, char *buf) { struct DWC_ETH_QOS_prv_data *pdata = NULL; pdata = container_of(attr, struct DWC_ETH_QOS_prv_data, itr_lat_attr); return sprintf(buf, "%d\n", ONE_SEC_TO_NS/pdata->itr_latency); } static ssize_t gbe_itr_ips_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { uint32_t max_ips = simple_strtoul(buf, NULL, 0); struct DWC_ETH_QOS_prv_data *pdata = NULL; pdata = container_of(attr, struct DWC_ETH_QOS_prv_data, itr_lat_attr); pdata->itr_latency = ONE_SEC_TO_NS/max_ips; return count; } static void create_gbe_sysfs(struct DWC_ETH_QOS_prv_data *pdata) { struct device_attribute *dev_attr = &pdata->rate_attr; sysfs_attr_init(&dev_attr->attr); dev_attr->show = gbe_speed_show; dev_attr->store = gbe_speed_store; dev_attr->attr.mode = S_IRUGO | S_IWUSR; dev_attr->attr.name = "rate"; if (device_create_file(&pdata->dev->dev, dev_attr)) { printk(KERN_ALERT "[GBE] Error creating rate sysfs attribute!\n"); } dev_attr = &pdata->stats_attr; sysfs_attr_init(&dev_attr->attr); dev_attr->show = gbe_stats_show; dev_attr->store = NULL; dev_attr->attr.mode = S_IRUGO; dev_attr->attr.name = "stats"; if (device_create_file(&pdata->dev->dev, dev_attr)) { printk(KERN_ALERT "[GBE] Error creating stats sysfs attribute!\n"); } #ifdef GBE_DEBUG dev_attr = &pdata->debug_attr; sysfs_attr_init(&dev_attr->attr); dev_attr->show = gbe_dbg_show; dev_attr->store = NULL; dev_attr->attr.mode = S_IRUGO; dev_attr->attr.name = "debug"; if (device_create_file(&pdata->dev->dev, dev_attr)) { printk(KERN_ALERT "[GBE] Error creating debug sysfs attribute!\n"); } dev_attr = &pdata->suspend_attr; sysfs_attr_init(&dev_attr->attr); dev_attr->show = NULL; dev_attr->store = gbe_suspend_store; dev_attr->attr.mode = S_IWUSR; dev_attr->attr.name = "suspend"; if (device_create_file(&pdata->dev->dev, dev_attr)) { printk(KERN_ALERT "[GBE] Error creating suspend sysfs attribute!\n"); } #endif dev_attr = &pdata->itr_lat_attr; sysfs_attr_init(&dev_attr->attr); dev_attr->show = gbe_itr_ips_show; dev_attr->store = gbe_itr_ips_store; dev_attr->attr.mode = S_IRUGO | S_IWUSR; dev_attr->attr.name = "itr_max_ips"; if (device_create_file(&pdata->dev->dev, dev_attr)) { printk(KERN_ALERT "[GBE] Error creating itr_max_ips sysfs attribute!\n"); } } void gbe_core_version(struct DWC_ETH_QOS_prv_data *pdata) { // Enable Interrupt Controller if version is 4.00 pdata->version = DWC_REG_RD_FIELD(MAC_VR, MAC_VR_SNPSVER); CFG_PRINT("[GBE] Core version = 0x%02x\n", pdata->version); if (pdata->version == MAC_VER_4_00) gbe_configure_IC(pdata->gbe_base); } static int gbe_handle_suspend_resume(void *args, netss_power_state_t state) { struct DWC_ETH_QOS_prv_data *pdata = (struct DWC_ETH_QOS_prv_data *)args; int ret = -EINVAL; if (state == NETSS_NETIP_POWER_STATE_OFF) { ret = DWC_ETH_QOS_powerdown(pdata->dev, DWC_ETH_QOS_NETIP_WAKEUP); } else if (state == NETSS_NETIP_POWER_STATE_ACTIVE) { ret = DWC_ETH_QOS_powerup(pdata->dev); } return ret; } void DWC_ETH_QOS_init_all_fptrs(struct DWC_ETH_QOS_prv_data *pdata) { DWC_ETH_QOS_init_function_ptrs_dev(&pdata->hw_if); DWC_ETH_QOS_init_function_ptrs_desc(&pdata->desc_if); } struct DWC_ETH_QOS_prv_data *pdata_p; /*! * \brief API to initialize the device. * * \details This probing function gets called (during execution of * pci_register_driver() for already existing devices or later if a * new device gets inserted) for all PCI devices which match the ID table * and are not "owned" by the other drivers yet. This function gets passed * a "struct pci_dev *" for each device whose entry in the ID table matches * the device. The probe function returns zero when the driver chooses to take * "ownership" of the device or an error code (negative number) otherwise. * The probe function always gets called from process context, so it can sleep. * * \param[in] pdev - pointer to pci_dev structure. * \param[in] id - pointer to table of device ID/ID's the driver is inerested. * * \return integer * * \retval 0 on success & -ve number on failure. */ int DWC_ETH_QOS_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct DWC_ETH_QOS_prv_data *pdata = NULL; struct net_device *dev = NULL; int i, ret = 0; hw_interface_t *hw_if = NULL; struct desc_if_struct *desc_if = NULL; uint8_t tx_q_count = 0, rx_q_count = 0; void __iomem *gbe_base; unsigned int gbe_mux_cfg; netss_power_state_callback_info_t pm_callback_info; #ifdef GBE_DEBUG char dbg_str[]="_DEBUG"; #else char dbg_str[]=""; #endif CFG_PRINT("--> DWC_ETH_QOS_probe\n"); if ((ret = pci_enable_device(pdev)) != 0) { printk(KERN_ALERT "%s:Unable to enable device\n", DEV_NAME); goto err_out_enb_failed; } if (msi_mode) { ret = pci_enable_msi(pdev); if (ret != 0) { printk(KERN_ALERT "%s:Unable to enable MSI\n", DEV_NAME); goto err_out_msi_failed; } } if (gbe_init_top_registers(&gbe_base, &gbe_mux_cfg)) { ret = -ENODEV; goto err_out_req_reg_failed; } if (pci_request_regions(pdev, DEV_NAME)) { printk(KERN_ALERT "%s:Failed to get PCI regions\n", DEV_NAME); ret = -ENODEV; goto err_out_req_reg_failed; } pci_set_master(pdev); for (i = 0; i <= 5; i++) { if (pci_resource_len(pdev, i) == 0) continue; dwc_eth_qos_pci_base_addr = (uint32_t) pci_iomap(pdev, i, COMPLETE_BAR); if ((void __iomem *)dwc_eth_qos_pci_base_addr == NULL) { printk(KERN_ALERT "%s: cannot map register memory, aborting", pci_name(pdev)); ret = -EIO; goto err_out_map_failed; } break; } DBGPR("dwc_eth_qos_pci_base_addr = %#lx\n", dwc_eth_qos_pci_base_addr); if (num_of_queues) { tx_q_count = num_of_queues; rx_q_count = num_of_queues; } else { tx_q_count = get_tx_queue_count(); rx_q_count = get_rx_queue_count(); } dev = alloc_netdev_mqs(sizeof(struct DWC_ETH_QOS_prv_data), "gmac5", NET_NAME_PREDICTABLE, ether_setup, tx_q_count, rx_q_count); if (dev == NULL) { printk(KERN_ALERT "%s:Unable to alloc new net device\n", DEV_NAME); ret = -ENOMEM; goto err_out_dev_failed; } dev->dev_addr[0] = dev_addr[0]; dev->dev_addr[1] = dev_addr[1]; dev->dev_addr[2] = dev_addr[2]; dev->dev_addr[3] = dev_addr[3]; dev->dev_addr[4] = dev_addr[4]; dev->dev_addr[5] = dev_addr[5]; dev->base_addr = dwc_eth_qos_pci_base_addr; SET_NETDEV_DEV(dev, &pdev->dev); pdata = netdev_priv(dev); pdata_p = pdata; DWC_ETH_QOS_init_all_fptrs(pdata); hw_if = &(pdata->hw_if); desc_if = &(pdata->desc_if); /* Initialize HW configuration variables */ memset(&pdata->hw_cfg, 0, sizeof(hw_config_t)); pci_set_drvdata(pdev, dev); pdata->pdev = pdev; pdata->dev = dev; pdata->tx_queue_cnt = tx_q_count; pdata->rx_queue_cnt = rx_q_count; pdata->gbe_base = gbe_base; pdata->mux_cfg = gbe_mux_cfg; pdata->rate = (gbe_mux_cfg == GMCR_GMAC5_TO_PHY)? GBE_GCR5_PHY_SPEED_1G : gbe_get_4to5_speed(); pdata->itr_latency = ONE_SEC_TO_NS/DEFAULT_NUM_IPS; /* Verify GMAC core version */ gbe_core_version(pdata); /* issue software reset to device */ hw_if->sw_reset(); dev->irq = pdev->irq; DWC_ETH_QOS_get_all_hw_features(pdata); DWC_ETH_QOS_print_all_hw_features(pdata); // Disable MDIO pdata->hw_feat.sma_sel = 0; /* Override TSO with module parameter (if HW supports TSO) */ if (pdata->hw_feat.tso_en) pdata->hw_feat.tso_en = tso_enable; /* Notify of potential known issues with TSO in core v4.00 */ if (pdata->hw_feat.tso_en && pdata->version == MAC_VER_4_00 && num_of_queues > 1) WRN_PRINT("TSO in v4.00 with more than one queue may fail!\n"); #ifdef GBE_DEBUG // Force enable TSO if mss parameter was passed if (mss_for_tso) pdata->hw_feat.tso_en = 1; if (metadata_on_crc) { CFG_PRINT("Disabling COE to test metadata on CRC\n"); // Disable Rx and Tx Checksum offload to test CRC on metadata pdata->hw_feat.tx_coe_sel = pdata->hw_feat.rx_coe_sel = 0; } #endif ret = desc_if->alloc_queue_struct(pdata); if (ret < 0) { printk(KERN_ALERT "ERROR: Unable to alloc Tx/Rx queue\n"); goto err_out_q_alloc_failed; } dev->netdev_ops = DWC_ETH_QOS_get_netdev_ops(); pdata->interface = DWC_ETH_QOS_get_phy_interface(pdata); /* Bypass PHYLIB for TBI, RTBI and SGMII interface */ if (1 == pdata->hw_feat.sma_sel) { ret = DWC_ETH_QOS_mdio_register(dev); if (ret < 0) { printk(KERN_ALERT "MDIO bus (id %d) registration failed\n", pdata->bus_id); goto err_out_mdio_reg; } } else { printk(KERN_ALERT "%s: MDIO is not present\n\n", DEV_NAME); } /* enabling and registration of irq with magic wakeup */ if (1 == pdata->hw_feat.mgk_sel) { device_set_wakeup_capable(&pdev->dev, 1); pdata->wolopts = WAKE_MAGIC; enable_irq_wake(dev->irq); } netif_napi_add(dev, &pdata->rx_napi, DWC_ETH_QOS_poll_rx, 64 * DWC_ETH_QOS_RX_QUEUE_CNT); dev->ethtool_ops = DWC_ETH_QOS_get_ethtool_ops(); if (pdata->hw_feat.tso_en) { dev->hw_features = NETIF_F_TSO; dev->hw_features |= NETIF_F_SG; dev->hw_features |= NETIF_F_IP_CSUM; dev->hw_features |= NETIF_F_IPV6_CSUM; CFG_PRINT("Supports TSO, SG and TX COE\n"); } else if (pdata->hw_feat.tx_coe_sel) { dev->hw_features = NETIF_F_IP_CSUM ; dev->hw_features |= NETIF_F_IPV6_CSUM; CFG_PRINT("Supports TX COE\n"); } if (pdata->hw_feat.rx_coe_sel) { dev->hw_features |= NETIF_F_RXCSUM; CFG_PRINT("Supports RX COE\n"); } #ifdef DWC_ETH_QOS_ENABLE_VLAN_TAG dev->vlan_features |= dev->hw_features; dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; if (pdata->hw_feat.sa_vlan_ins) { dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; CFG_PRINT("VLAN Feature enabled\n"); } /* Override VLAN filter enabling with module parameter */ if (pdata->hw_feat.vlan_hash_en) pdata->hw_feat.vlan_hash_en = vlan_filter_enable; if (pdata->hw_feat.vlan_hash_en) { dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; CFG_PRINT("VLAN HASH Filtering enabled\n"); } #endif /* end of DWC_ETH_QOS_ENABLE_VLAN_TAG */ dev->features |= dev->hw_features; pdata->dev_state |= dev->features; if(!(dev->features & NETIF_F_LRO)) dev->features |= NETIF_F_GRO; DWC_ETH_QOS_init_rx_coalesce(pdata); #ifdef DWC_ETH_QOS_CONFIG_PTP DWC_ETH_QOS_ptp_init(pdata); #endif /* end of DWC_ETH_QOS_CONFIG_PTP */ spin_lock_init(&pdata->lock); spin_lock_init(&pdata->tx_lock); spin_lock_init(&pdata->pmt_lock); ret = register_netdev(dev); if (ret) { printk(KERN_ALERT "%s: Net device registration failed\n", DEV_NAME); goto err_out_netdev_failed; } #ifdef DWC_ETH_QOS_CONFIG_DEBUGFS create_debug_files(); #endif create_gbe_sysfs(pdata); /* Register PM callback with NetSS driver */ pm_callback_info.func = gbe_handle_suspend_resume; pm_callback_info.args = pdata; if (netss_power_state_change_callback_register(NETSS_DEV_GBE, &pm_callback_info)) { ERR_PRINT("Failed to register PM callback with NetSS!\n"); // TODO: // - Determine if it's ok to continue or return error code // - What if NetIP is in DSBY already? } if (pdata->hw_feat.pcs_sel) { netif_carrier_off(dev); CFG_PRINT("Carrier off till LINK is up\n"); } printk(KERN_INFO "Initializing Synopsys DWC_ETH_QOS%s driver\n", dbg_str); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_allow(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, GMAC5_AUTOSUSPEND_DELAY_IN_MILLISEC); pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_sync_autosuspend(&pdev->dev); CFG_PRINT("<-- DWC_ETH_QOS_probe\n"); return 0; err_out_netdev_failed: #ifdef DWC_ETH_QOS_CONFIG_PTP DWC_ETH_QOS_ptp_remove(pdata); #endif /* end of DWC_ETH_QOS_CONFIG_PTP */ if (1 == pdata->hw_feat.sma_sel) DWC_ETH_QOS_mdio_unregister(dev); err_out_mdio_reg: desc_if->free_queue_struct(pdata); err_out_q_alloc_failed: free_netdev(dev); pci_set_drvdata(pdev, NULL); err_out_dev_failed: pci_iounmap(pdev, (void __iomem *)dwc_eth_qos_pci_base_addr); err_out_map_failed: pci_release_regions(pdev); err_out_req_reg_failed: if (msi_mode) pci_disable_msi(pdev); err_out_msi_failed: pci_disable_device(pdev); err_out_enb_failed: return ret; } /*! * \brief API to release all the resources from the driver. * * \details The remove function gets called whenever a device being handled * by this driver is removed (either during deregistration of the driver or * when it is manually pulled out of a hot-pluggable slot). This function * should reverse operations performed at probe time. The remove function * always gets called from process context, so it can sleep. * * \param[in] pdev - pointer to pci_dev structure. * * \return void */ void DWC_ETH_QOS_remove(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct DWC_ETH_QOS_prv_data *pdata = netdev_priv(dev); struct desc_if_struct *desc_if = &(pdata->desc_if); void __iomem *reg_base = pdata->gbe_base; netss_power_state_callback_info_t pm_callback_info; DBGPR("--> DWC_ETH_QOS_remove\n"); /* Deregister PM callback with NetSS driver */ pm_callback_info.func = NULL; pm_callback_info.args = NULL; netss_power_state_change_callback_register(NETSS_DEV_GBE, &pm_callback_info); if (pdata->irq_number != 0) { free_irq(pdata->irq_number, pdata); pdata->irq_number = 0; } if (1 == pdata->hw_feat.sma_sel) DWC_ETH_QOS_mdio_unregister(dev); #ifdef DWC_ETH_QOS_CONFIG_PTP DWC_ETH_QOS_ptp_remove(pdata); #endif /* end of DWC_ETH_QOS_CONFIG_PTP */ unregister_netdev(dev); #ifdef DWC_ETH_QOS_CONFIG_DEBUGFS remove_debug_files(); #endif desc_if->free_queue_struct(pdata); free_netdev(dev); pci_set_drvdata(pdev, NULL); pci_iounmap(pdev, (void __iomem *)dwc_eth_qos_pci_base_addr); pci_release_regions(pdev); pci_disable_device(pdev); if (msi_mode) pci_disable_msi(pdev); // Disable GMAC5 core GBE_REG_WR_BIT(GBE_GCR5, GBE_GCR5_ENABLE, 0x0); iounmap(reg_base); DBGPR("<-- DWC_ETH_QOS_remove\n"); return; } #ifdef CONFIG_PM static int DWC_ETH_QOS_suspend(struct pci_dev *, pm_message_t); static int DWC_ETH_QOS_resume(struct pci_dev *); #endif static struct pci_device_id DWC_ETH_QOS_id[] = { {PCI_DEVICE(VENDOR_ID, DEVICE_ID)}, {0}, /* terminate list */ }; struct pci_dev *DWC_ETH_QOS_pcidev; static struct pci_driver DWC_ETH_QOS_pci_driver = { .name = "DWC_ETH_QOS", .id_table = DWC_ETH_QOS_id, .probe = DWC_ETH_QOS_probe, .remove = DWC_ETH_QOS_remove, #ifdef CONFIG_PM .suspend = DWC_ETH_QOS_suspend, .resume = DWC_ETH_QOS_resume, #endif .driver = { .name = DEV_NAME, .owner = THIS_MODULE, .pm = &DWC_ETH_QOS_rpm_ops, }, }; int DWC_ETH_QOS_runtime_suspend(struct device *dev) { if (0 > pm_runtime_autosuspend_expiration(dev)) { CFG_PRINT("[%s] is NOT Suspending!!!\n", DEV_NAME); return -EAGAIN; } CFG_PRINT("[%s] is Suspended!!!\n", DEV_NAME); return 0; } int DWC_ETH_QOS_runtime_resume(struct device *dev) { pm_runtime_mark_last_busy(dev); CFG_PRINT("[%s] is Resumed!!!\n", DEV_NAME); return 0; } #ifdef CONFIG_PM /*! * \brief Routine to put the device in suspend mode * * \details This function gets called by PCI core when the device is being * suspended. The suspended state is passed as input argument to it. * Following operations are performed in this function, * - stop the phy. * - detach the device from stack. * - stop the queue. * - Disable napi. * - Stop DMA TX and RX process. * - Enable power down mode using PMT module or disable MAC TX and RX process. * - Save the pci state. * * \param[in] pdev – pointer to pci device structure. * \param[in] state – suspend state of device. * * \return int * * \retval 0 */ static int DWC_ETH_QOS_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); struct DWC_ETH_QOS_prv_data *pdata = netdev_priv(dev); hw_interface_t *hw_if = &(pdata->hw_if); int ret, pmt_flags = 0; unsigned int rwk_filter_values[] = { /* for filter 0 CRC is computed on 0 - 7 bytes from offset */ 0x000000ff, /* for filter 1 CRC is computed on 0 - 7 bytes from offset */ 0x000000ff, /* for filter 2 CRC is computed on 0 - 7 bytes from offset */ 0x000000ff, /* for filter 3 CRC is computed on 0 - 31 bytes from offset */ 0x000000ff, /* filter 0, 1 independently enabled and would apply for * unicast packet only filter 3, 2 combined as, * "Filter-3 pattern AND NOT Filter-2 pattern" */ 0x03050101, /* filter 3, 2, 1 and 0 offset is 50, 58, 66, 74 bytes * from start */ 0x4a423a32, /* pattern for filter 1 and 0, "0x55", "11", repeated 8 times */ 0xe7b77eed, /* pattern for filter 3 and 4, "0x44", "33", repeated 8 times */ 0x9b8a5506, }; DBGPR("-->DWC_ETH_QOS_suspend\n"); if (!dev || !netif_running(dev) || (!pdata->hw_feat.mgk_sel && !pdata->hw_feat.rwk_sel)) { DBGPR("<--DWC_ETH_QOS_dev_suspend\n"); return -EINVAL; } if (pdata->hw_feat.rwk_sel && (pdata->wolopts & WAKE_UCAST)) { pmt_flags |= DWC_ETH_QOS_REMOTE_WAKEUP; hw_if->configure_rwk_filter(rwk_filter_values, 8); } if (pdata->hw_feat.mgk_sel && (pdata->wolopts & WAKE_MAGIC)) pmt_flags |= DWC_ETH_QOS_MAGIC_WAKEUP; ret = DWC_ETH_QOS_powerdown(dev, pmt_flags); pci_save_state(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); DBGPR("<--DWC_ETH_QOS_suspend\n"); return ret; } /*! * \brief Routine to resume device operation * * \details This function gets called by PCI core when the device is being * resumed. It is always called after suspend has been called. These function * reverse operations performed at suspend time. Following operations are * performed in this function, * - restores the saved pci power state. * - Wakeup the device using PMT module if supported. * - Starts the phy. * - Enable MAC and DMA TX and RX process. * - Attach the device to stack. * - Enable napi. * - Starts the queue. * * \param[in] pdev – pointer to pci device structure. * * \return int * * \retval 0 */ static int DWC_ETH_QOS_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); int ret; DBGPR("-->DWC_ETH_QOS_resume\n"); if (!dev || !netif_running(dev)) { DBGPR("<--DWC_ETH_QOS_dev_resume\n"); return -EINVAL; } pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); ret = DWC_ETH_QOS_powerup(dev); DBGPR("<--DWC_ETH_QOS_resume\n"); return ret; } #endif /* CONFIG_PM */ /*! * \brief API to register the driver. * * \details This is the first function called when the driver is loaded. * It register the driver with PCI sub-system * * \return void. */ static int DWC_ETH_QOS_init_module(void) { int ret = 0; DBGPR("-->DWC_ETH_QOS_init_module\n"); ret = pci_register_driver(&DWC_ETH_QOS_pci_driver); if (ret < 0) { printk(KERN_ALERT "DWC_ETH_QOS:driver registration failed"); } DBGPR("<--DWC_ETH_QOS_init_module\n"); return ret; } /*! * \brief API to unregister the driver. * * \details This is the first function called when the driver is removed. * It unregister the driver from PCI sub-system * * \return void. */ static void __exit DWC_ETH_QOS_exit_module(void) { DBGPR("-->DWC_ETH_QOS_exit_module\n"); pci_unregister_driver(&DWC_ETH_QOS_pci_driver); DBGPR("<--DWC_ETH_QOS_exit_module\n"); } /*! * \brief Macro to register the driver registration function. * * \details A module always begin with either the init_module or the function * you specify with module_init call. This is the entry function for modules; * it tells the kernel what functionality the module provides and sets up the * kernel to run the module's functions when they're needed. Once it does this, * entry function returns and the module does nothing until the kernel wants * to do something with the code that the module provides. */ module_init(DWC_ETH_QOS_init_module); /*! * \brief Macro to register the driver un-registration function. * * \details All modules end by calling either cleanup_module or the function * you specify with the module_exit call. This is the exit function for modules; * it undoes whatever entry function did. It unregisters the functionality * that the entry function registered. */ module_exit(DWC_ETH_QOS_exit_module); /*! * \brief Macro to declare the module author. * * \details This macro is used to declare the module's authore. */ MODULE_AUTHOR("Synopsys India Pvt Ltd"); /*! * \brief Macro to describe what the module does. * * \details This macro is used to describe what the module does. */ MODULE_DESCRIPTION("DWC_ETH_QOS Driver"); /*! * \brief Macro to describe the module license. * * \details This macro is used to describe the module license. */ MODULE_LICENSE("GPL");