--- zzzz-none-000/linux-3.10.107/drivers/net/ethernet/intel/igb/igb_main.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/net/ethernet/intel/igb/igb_main.c 2021-02-04 17:41:59.000000000 +0000 @@ -1,29 +1,25 @@ -/******************************************************************************* - - Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -61,8 +57,8 @@ #include "igb.h" #define MAJ 5 -#define MIN 0 -#define BUILD 3 +#define MIN 3 +#define BUILD 0 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" char igb_driver_name[] = "igb"; @@ -70,13 +66,13 @@ static const char igb_driver_string[] = "Intel(R) Gigabit Ethernet Network Driver"; static const char igb_copyright[] = - "Copyright (c) 2007-2013 Intel Corporation."; + "Copyright (c) 2007-2014 Intel Corporation."; static const struct e1000_info *igb_info_tbl[] = { [board_82575] = &e1000_82575_info, }; -static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { +static const struct pci_device_id igb_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, @@ -85,6 +81,8 @@ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, @@ -116,7 +114,6 @@ MODULE_DEVICE_TABLE(pci, igb_pci_tbl); -void igb_reset(struct igb_adapter *); static int igb_setup_all_tx_resources(struct igb_adapter *); static int igb_setup_all_rx_resources(struct igb_adapter *); static void igb_free_all_tx_resources(struct igb_adapter *); @@ -140,7 +137,7 @@ static void igb_watchdog_task(struct work_struct *); static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats); + struct rtnl_link_stats64 *stats); static int igb_change_mtu(struct net_device *, int); static int igb_set_mac(struct net_device *, void *); static void igb_set_uta(struct igb_adapter *adapter); @@ -154,11 +151,12 @@ #endif /* CONFIG_IGB_DCA */ static int igb_poll(struct napi_struct *, int); static bool igb_clean_tx_irq(struct igb_q_vector *); -static bool igb_clean_rx_irq(struct igb_q_vector *, int); +static int igb_clean_rx_irq(struct igb_q_vector *, int); static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); static void igb_tx_timeout(struct net_device *); static void igb_reset_task(struct work_struct *); -static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features); +static void igb_vlan_mode(struct net_device *netdev, + netdev_features_t features); static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16); static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16); static void igb_restore_vlan(struct igb_adapter *); @@ -171,7 +169,7 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); -static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); +static int igb_ndo_set_vf_bw(struct net_device *, int, int, int); static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, @@ -180,6 +178,9 @@ #ifdef CONFIG_PCI_IOV static int igb_vf_configure(struct igb_adapter *adapter, int vf); +static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); +static int igb_disable_sriov(struct pci_dev *dev); +static int igb_pci_disable_sriov(struct pci_dev *dev); #endif #ifdef CONFIG_PM @@ -187,11 +188,9 @@ static int igb_suspend(struct device *); #endif static int igb_resume(struct device *); -#ifdef CONFIG_PM_RUNTIME static int igb_runtime_suspend(struct device *dev); static int igb_runtime_resume(struct device *dev); static int igb_runtime_idle(struct device *dev); -#endif static const struct dev_pm_ops igb_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, @@ -213,10 +212,9 @@ static void igb_netpoll(struct net_device *); #endif #ifdef CONFIG_PCI_IOV -static unsigned int max_vfs = 0; +static unsigned int max_vfs; module_param(max_vfs, uint, 0); -MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " - "per physical function"); +MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function"); #endif /* CONFIG_PCI_IOV */ static pci_ers_result_t igb_io_error_detected(struct pci_dev *, @@ -382,8 +380,7 @@ /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); - pr_info("Device Name state trans_start " - "last_rx\n"); + pr_info("Device Name state trans_start last_rx\n"); pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, netdev->state, netdev->trans_start, netdev->last_rx); } @@ -436,9 +433,7 @@ pr_info("------------------------------------\n"); pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); pr_info("------------------------------------\n"); - pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] " - "[bi->dma ] leng ntw timestamp " - "bi->skb\n"); + pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { const char *next_desc; @@ -456,9 +451,8 @@ else next_desc = ""; - pr_info("T [0x%03X] %016llX %016llX %016llX" - " %04X %p %016llX %p%s\n", i, - le64_to_cpu(u0->a), + pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", + i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), (u64)dma_unmap_addr(buffer_info, dma), dma_unmap_len(buffer_info, len), @@ -517,10 +511,8 @@ pr_info("------------------------------------\n"); pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); pr_info("------------------------------------\n"); - pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] " - "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); - pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----" - "----------- [bi->skb] <-- Adv Rx Write-Back format\n"); + pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); + pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); for (i = 0; i < rx_ring->count; i++) { const char *next_desc; @@ -582,7 +574,7 @@ struct e1000_hw *hw = &adapter->hw; s32 i2cctl = rd32(E1000_I2CPARAMS); - return ((i2cctl & E1000_I2C_DATA_IN) != 0); + return !!(i2cctl & E1000_I2C_DATA_IN); } /** @@ -646,7 +638,7 @@ struct e1000_hw *hw = &adapter->hw; s32 i2cctl = rd32(E1000_I2CPARAMS); - return ((i2cctl & E1000_I2C_CLK_IN) != 0); + return !!(i2cctl & E1000_I2C_CLK_IN); } static const struct i2c_algo_bit_data igb_i2c_algo = { @@ -679,9 +671,9 @@ static int __init igb_init_module(void) { int ret; + pr_info("%s - version %s\n", igb_driver_string, igb_driver_version); - pr_info("%s\n", igb_copyright); #ifdef CONFIG_IGB_DCA @@ -734,12 +726,14 @@ adapter->rx_ring[i]->reg_idx = rbase_offset + Q_IDX_82576(i); } + /* Fall through */ case e1000_82575: case e1000_82580: case e1000_i350: case e1000_i354: case e1000_i210: case e1000_i211: + /* Fall through */ default: for (; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->reg_idx = rbase_offset + i; @@ -749,6 +743,28 @@ } } +u32 igb_rd32(struct e1000_hw *hw, u32 reg) +{ + struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); + u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr); + u32 value = 0; + + if (E1000_REMOVED(hw_addr)) + return ~value; + + value = readl(&hw_addr[reg]); + + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct net_device *netdev = igb->netdev; + hw->hw_addr = NULL; + netif_device_detach(netdev); + netdev_err(netdev, "PCIe link lost, device now detached\n"); + } + + return value; +} + /** * igb_write_ivar - configure ivar for given MSI-X vector * @hw: pointer to the HW structure @@ -800,7 +816,7 @@ msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; if (tx_queue > IGB_N0_QUEUE) msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; - if (!adapter->msix_entries && msix_vector == 0) + if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0) msixbm |= E1000_EIMS_OTHER; array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); q_vector->eims_value = msixbm; @@ -980,43 +996,71 @@ return err; } -static void igb_reset_interrupt_capability(struct igb_adapter *adapter) -{ - if (adapter->msix_entries) { - pci_disable_msix(adapter->pdev); - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; - } else if (adapter->flags & IGB_FLAG_HAS_MSI) { - pci_disable_msi(adapter->pdev); - } -} - /** * igb_free_q_vector - Free memory allocated for specific interrupt vector * @adapter: board private structure to initialize * @v_idx: Index of vector to be freed * - * This function frees the memory allocated to the q_vector. In addition if - * NAPI is enabled it will delete any references to the NAPI struct prior - * to freeing the q_vector. + * This function frees the memory allocated to the q_vector. **/ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) { struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + adapter->q_vector[v_idx] = NULL; + + /* igb_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + if (q_vector) + kfree_rcu(q_vector, rcu); +} + +/** + * igb_reset_q_vector - Reset config for interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be reset + * + * If NAPI is enabled it will delete any references to the + * NAPI struct. This is preparation for igb_free_q_vector. + **/ +static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) +{ + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + + /* Coming from igb_set_interrupt_capability, the vectors are not yet + * allocated. So, q_vector is NULL so we should stop here. + */ + if (!q_vector) + return; + + /* Coming from igb_set_interrupt_capability, the vectors are not yet + * allocated. So, q_vector is NULL so we should stop here. + */ + if (!q_vector) + return; + if (q_vector->tx.ring) adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; if (q_vector->rx.ring) - adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL; + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; - adapter->q_vector[v_idx] = NULL; netif_napi_del(&q_vector->napi); - /* ixgbe_get_stats64() might access the rings on this vector, - * we must wait a grace period before freeing it. - */ - kfree_rcu(q_vector, rcu); +} + +static void igb_reset_interrupt_capability(struct igb_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + if (adapter->flags & IGB_FLAG_HAS_MSIX) + pci_disable_msix(adapter->pdev); + else if (adapter->flags & IGB_FLAG_HAS_MSI) + pci_disable_msi(adapter->pdev); + + while (v_idx--) + igb_reset_q_vector(adapter, v_idx); } /** @@ -1035,8 +1079,10 @@ adapter->num_rx_queues = 0; adapter->num_q_vectors = 0; - while (v_idx--) + while (v_idx--) { + igb_reset_q_vector(adapter, v_idx); igb_free_q_vector(adapter, v_idx); + } } /** @@ -1067,6 +1113,7 @@ if (!msix) goto msi_only; + adapter->flags |= IGB_FLAG_HAS_MSIX; /* Number of supported queues. */ adapter->num_rx_queues = adapter->rss_queues; @@ -1087,25 +1134,21 @@ /* add 1 vector for link status interrupts */ numvecs++; - adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), - GFP_KERNEL); - - if (!adapter->msix_entries) - goto msi_only; - for (i = 0; i < numvecs; i++) adapter->msix_entries[i].entry = i; - err = pci_enable_msix(adapter->pdev, - adapter->msix_entries, - numvecs); - if (err == 0) + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, + numvecs, + numvecs); + if (err > 0) return; igb_reset_interrupt_capability(adapter); /* If we can't do MSI-X, try MSI */ msi_only: + adapter->flags &= ~IGB_FLAG_HAS_MSIX; #ifdef CONFIG_PCI_IOV /* disable SR-IOV for non MSI-X configurations */ if (adapter->vf_data) { @@ -1169,7 +1212,15 @@ (sizeof(struct igb_ring) * ring_count); /* allocate q_vector and rings */ - q_vector = kzalloc(size, GFP_KERNEL); + q_vector = adapter->q_vector[v_idx]; + if (!q_vector) { + q_vector = kzalloc(size, GFP_KERNEL); + } else if (size > ksize(q_vector)) { + kfree_rcu(q_vector, rcu); + q_vector = kzalloc(size, GFP_KERNEL); + } else { + memset(q_vector, 0, size); + } if (!q_vector) return -ENOMEM; @@ -1221,6 +1272,9 @@ ring->count = adapter->tx_ring_count; ring->queue_index = txr_idx; + u64_stats_init(&ring->tx_syncp); + u64_stats_init(&ring->tx_syncp2); + /* assign ring to adapter */ adapter->tx_ring[txr_idx] = ring; @@ -1243,8 +1297,7 @@ if (adapter->hw.mac.type >= e1000_82576) set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); - /* - * On i350, i354, i210, and i211, loopback VLAN packets + /* On i350, i354, i210, and i211, loopback VLAN packets * have the tag byte-swapped. */ if (adapter->hw.mac.type >= e1000_i350) @@ -1254,6 +1307,8 @@ ring->count = adapter->rx_ring_count; ring->queue_index = rxr_idx; + u64_stats_init(&ring->rx_syncp); + /* assign ring to adapter */ adapter->rx_ring[rxr_idx] = ring; } @@ -1294,6 +1349,7 @@ for (; v_idx < q_vectors; v_idx++) { int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + err = igb_alloc_q_vector(adapter, q_vectors, v_idx, tqpv, txr_idx, rqpv, rxr_idx); @@ -1362,7 +1418,7 @@ struct pci_dev *pdev = adapter->pdev; int err = 0; - if (adapter->msix_entries) { + if (adapter->flags & IGB_FLAG_HAS_MSIX) { err = igb_request_msix(adapter); if (!err) goto request_done; @@ -1406,7 +1462,7 @@ static void igb_free_irq(struct igb_adapter *adapter) { - if (adapter->msix_entries) { + if (adapter->flags & IGB_FLAG_HAS_MSIX) { int vector = 0, i; free_irq(adapter->msix_entries[vector++].vector, adapter); @@ -1431,8 +1487,9 @@ * mapped into these registers and so clearing the bits can cause * issues on the VF drivers so we only need to clear what we set */ - if (adapter->msix_entries) { + if (adapter->flags & IGB_FLAG_HAS_MSIX) { u32 regval = rd32(E1000_EIAM); + wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); wr32(E1000_EIMC, adapter->eims_enable_mask); regval = rd32(E1000_EIAC); @@ -1442,8 +1499,9 @@ wr32(E1000_IAM, 0); wr32(E1000_IMC, ~0); wrfl(); - if (adapter->msix_entries) { + if (adapter->flags & IGB_FLAG_HAS_MSIX) { int i; + for (i = 0; i < adapter->num_q_vectors; i++) synchronize_irq(adapter->msix_entries[i].vector); } else { @@ -1459,9 +1517,10 @@ { struct e1000_hw *hw = &adapter->hw; - if (adapter->msix_entries) { + if (adapter->flags & IGB_FLAG_HAS_MSIX) { u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; u32 regval = rd32(E1000_EIAC); + wr32(E1000_EIAC, regval | adapter->eims_enable_mask); regval = rd32(E1000_EIAM); wr32(E1000_EIAM, regval | adapter->eims_enable_mask); @@ -1601,6 +1660,73 @@ } /** + * Detect and switch function for Media Auto Sense + * @adapter: address of the board private structure + **/ +static void igb_check_swap_media(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext, connsw; + bool swap_now = false; + + ctrl_ext = rd32(E1000_CTRL_EXT); + connsw = rd32(E1000_CONNSW); + + /* need to live swap if current media is copper and we have fiber/serdes + * to go to. + */ + + if ((hw->phy.media_type == e1000_media_type_copper) && + (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) { + swap_now = true; + } else if (!(connsw & E1000_CONNSW_SERDESD)) { + /* copper signal takes time to appear */ + if (adapter->copper_tries < 4) { + adapter->copper_tries++; + connsw |= E1000_CONNSW_AUTOSENSE_CONF; + wr32(E1000_CONNSW, connsw); + return; + } else { + adapter->copper_tries = 0; + if ((connsw & E1000_CONNSW_PHYSD) && + (!(connsw & E1000_CONNSW_PHY_PDN))) { + swap_now = true; + connsw &= ~E1000_CONNSW_AUTOSENSE_CONF; + wr32(E1000_CONNSW, connsw); + } + } + } + + if (!swap_now) + return; + + switch (hw->phy.media_type) { + case e1000_media_type_copper: + netdev_info(adapter->netdev, + "MAS: changing media to fiber/serdes\n"); + ctrl_ext |= + E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + adapter->copper_tries = 0; + break; + case e1000_media_type_internal_serdes: + case e1000_media_type_fiber: + netdev_info(adapter->netdev, + "MAS: changing media to copper\n"); + ctrl_ext &= + ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + break; + default: + /* shouldn't get here during regular operation */ + netdev_err(adapter->netdev, + "AMS: Invalid media type found, returning\n"); + break; + } + wr32(E1000_CTRL_EXT, ctrl_ext); +} + +/** * igb_up - Open the interface and prepare it to handle traffic * @adapter: board private structure **/ @@ -1617,7 +1743,7 @@ for (i = 0; i < adapter->num_q_vectors; i++) napi_enable(&(adapter->q_vector[i]->napi)); - if (adapter->msix_entries) + if (adapter->flags & IGB_FLAG_HAS_MSIX) igb_configure_msix(adapter); else igb_assign_vector(adapter->q_vector[0], 0); @@ -1629,6 +1755,7 @@ /* notify VFs that reset has been completed */ if (adapter->vfs_allocated_count) { u32 reg_data = rd32(E1000_CTRL_EXT); + reg_data |= E1000_CTRL_EXT_PFRSTD; wr32(E1000_CTRL_EXT, reg_data); } @@ -1639,6 +1766,10 @@ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); + if ((adapter->flags & IGB_FLAG_EEE) && + (!hw->dev_spec._82575.eee_disable)) + adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; + return 0; } @@ -1659,6 +1790,7 @@ wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ + netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); /* disable transmits in the hardware */ @@ -1667,18 +1799,22 @@ wr32(E1000_TCTL, tctl); /* flush both disables and wait for them to finish */ wrfl(); - msleep(10); - - for (i = 0; i < adapter->num_q_vectors; i++) - napi_disable(&(adapter->q_vector[i]->napi)); + usleep_range(10000, 11000); igb_irq_disable(adapter); + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + + for (i = 0; i < adapter->num_q_vectors; i++) { + if (adapter->q_vector[i]) { + napi_synchronize(&adapter->q_vector[i]->napi); + napi_disable(&adapter->q_vector[i]->napi); + } + } + del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); - netif_carrier_off(netdev); - /* record the stats before reset*/ spin_lock(&adapter->stats64_lock); igb_update_stats(adapter, &adapter->stats64); @@ -1702,12 +1838,31 @@ { WARN_ON(in_interrupt()); while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); igb_down(adapter); igb_up(adapter); clear_bit(__IGB_RESETTING, &adapter->state); } +/** igb_enable_mas - Media Autosense re-enable after swap + * + * @adapter: adapter struct + **/ +static void igb_enable_mas(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 connsw = rd32(E1000_CONNSW); + + /* configure for SerDes media detect */ + if ((hw->phy.media_type == e1000_media_type_copper) && + (!(connsw & E1000_CONNSW_SERDESD))) { + connsw |= E1000_CONNSW_ENRGSRC; + connsw |= E1000_CONNSW_AUTOSENSE_EN; + wr32(E1000_CONNSW, connsw); + wrfl(); + } +} + void igb_reset(struct igb_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; @@ -1804,6 +1959,7 @@ /* disable receive for all VFs and wait one second */ if (adapter->vfs_allocated_count) { int i; + for (i = 0 ; i < adapter->vfs_allocated_count; i++) adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; @@ -1819,6 +1975,15 @@ hw->mac.ops.reset_hw(hw); wr32(E1000_WUC, 0); + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + /* need to resetup here after media swap */ + adapter->ei.get_invariants(hw); + adapter->flags &= ~IGB_FLAG_MEDIA_RESET; + } + if ((mac->type == e1000_82575) && + (adapter->flags & IGB_FLAG_MAS_ENABLE)) { + igb_enable_mas(adapter); + } if (hw->mac.ops.init_hw(hw)) dev_err(&pdev->dev, "Hardware Error\n"); @@ -1841,6 +2006,21 @@ } } #endif + /* Re-establish EEE setting */ + if (hw->phy.media_type == e1000_media_type_copper) { + switch (mac->type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + igb_set_eee_i350(hw, true, true); + break; + case e1000_i354: + igb_set_eee_i354(hw, true, true); + break; + default: + break; + } + } if (!netif_running(adapter->netdev)) igb_power_down_link(adapter); @@ -1906,7 +2086,7 @@ .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, .ndo_set_vf_mac = igb_ndo_set_vf_mac, .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, - .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw, + .ndo_set_vf_rate = igb_ndo_set_vf_bw, .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, .ndo_get_vf_config = igb_ndo_get_vf_config, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1914,6 +2094,7 @@ #endif .ndo_fix_features = igb_fix_features, .ndo_set_features = igb_set_features, + .ndo_features_check = passthru_features_check, }; /** @@ -1928,12 +2109,17 @@ igb_get_fw_version(hw, &fw); switch (hw->mac.type) { + case e1000_i210: case e1000_i211: - snprintf(adapter->fw_version, sizeof(adapter->fw_version), - "%2d.%2d-%d", - fw.invm_major, fw.invm_minor, fw.invm_img_type); - break; - + if (!(igb_get_flash_presence_i210(hw))) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%2d.%2d-%d", + fw.invm_major, fw.invm_minor, + fw.invm_img_type); + break; + } + /* fall through */ default: /* if option is rom valid, display its version too */ if (fw.or_valid) { @@ -1943,15 +2129,71 @@ fw.eep_major, fw.eep_minor, fw.etrack_id, fw.or_major, fw.or_build, fw.or_patch); /* no option rom */ - } else { + } else if (fw.etrack_id != 0X0000) { snprintf(adapter->fw_version, - sizeof(adapter->fw_version), - "%d.%d, 0x%08x", - fw.eep_major, fw.eep_minor, fw.etrack_id); + sizeof(adapter->fw_version), + "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, fw.etrack_id); + } else { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d.%d", + fw.eep_major, fw.eep_minor, fw.eep_build); } break; } - return; +} + +/** + * igb_init_mas - init Media Autosense feature if enabled in the NVM + * + * @adapter: adapter struct + **/ +static void igb_init_mas(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 eeprom_data; + + hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data); + switch (hw->bus.func) { + case E1000_FUNC_0: + if (eeprom_data & IGB_MAS_ENABLE_0) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_1: + if (eeprom_data & IGB_MAS_ENABLE_1) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_2: + if (eeprom_data & IGB_MAS_ENABLE_2) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_3: + if (eeprom_data & IGB_MAS_ENABLE_3) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + default: + /* Shouldn't get here */ + netdev_err(adapter->netdev, + "MAS: Invalid port configuration, returning\n"); + break; + } } /** @@ -1960,11 +2202,11 @@ **/ static s32 igb_init_i2c(struct igb_adapter *adapter) { - s32 status = E1000_SUCCESS; + s32 status = 0; /* I2C interface supported on i350 devices */ if (adapter->hw.mac.type != e1000_i350) - return E1000_SUCCESS; + return 0; /* Initialize the i2c bus which is controlled by the registers. * This bus will use the i2c_algo_bit structue that implements @@ -2001,7 +2243,6 @@ s32 ret_val; static int global_quad_port_a; /* global quad port a indication */ const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; - unsigned long mmio_start, mmio_len; int err, pci_using_dac; u8 part_str[E1000_PBANUM_LENGTH]; @@ -2019,21 +2260,15 @@ return err; pci_using_dac = 0; - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (!err) { - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) - pci_using_dac = 1; + pci_using_dac = 1; } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "No usable DMA configuration, aborting\n"); - goto err_dma; - } + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; } } @@ -2064,13 +2299,12 @@ hw->back = adapter; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); - mmio_start = pci_resource_start(pdev, 0); - mmio_len = pci_resource_len(pdev, 0); - err = -EIO; - hw->hw_addr = ioremap(mmio_start, mmio_len); - if (!hw->hw_addr) + adapter->io_addr = pci_iomap(pdev, 0, 0); + if (!adapter->io_addr) goto err_ioremap; + /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */ + hw->hw_addr = adapter->io_addr; netdev->netdev_ops = &igb_netdev_ops; igb_set_ethtool_ops(netdev); @@ -2078,8 +2312,8 @@ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); - netdev->mem_start = mmio_start; - netdev->mem_end = mmio_start + mmio_len; + netdev->mem_start = pci_resource_start(pdev, 0); + netdev->mem_end = pci_resource_end(pdev, 0); /* PCI config space info */ hw->vendor_id = pdev->vendor; @@ -2165,15 +2399,28 @@ */ hw->mac.ops.reset_hw(hw); - /* make sure the NVM is good , i211 parts have special NVM that - * doesn't contain a checksum + /* make sure the NVM is good , i211/i210 parts can have special NVM + * that doesn't contain a checksum */ - if (hw->mac.type != e1000_i211) { + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (igb_get_flash_presence_i210(hw)) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, + "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + break; + default: if (hw->nvm.ops.validate(hw) < 0) { dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); err = -EIO; goto err_eeprom; } + break; } /* copy the MAC address out of the NVM */ @@ -2191,6 +2438,12 @@ /* get firmware version for ethtool -i */ igb_set_fw_version(adapter); + /* configure RXPBSIZE and TXPBSIZE */ + if (hw->mac.type == e1000_i210) { + wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); + wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); + } + setup_timer(&adapter->watchdog_timer, igb_watchdog, (unsigned long) adapter); setup_timer(&adapter->phy_info_timer, igb_update_phy_info, @@ -2283,7 +2536,8 @@ } /* let the f/w know that the h/w is now under the control of the - * driver. */ + * driver. + */ igb_get_hw_control(adapter); strcpy(netdev->name, "eth%d"); @@ -2322,6 +2576,11 @@ adapter->ets = false; } #endif + /* Check if Media Autosense is enabled */ + adapter->ei = *ei; + if (hw->dev_spec._82575.mas_capable) + igb_init_mas(adapter); + /* do hw tstamp init after resetting */ igb_ptp_init(adapter); @@ -2341,32 +2600,52 @@ "Width x1" : "unknown"), netdev->dev_addr); } - ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH); + if ((hw->mac.type >= e1000_i210 || + igb_get_flash_presence_i210(hw))) { + ret_val = igb_read_part_string(hw, part_str, + E1000_PBANUM_LENGTH); + } else { + ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND; + } + if (ret_val) strcpy(part_str, "Unknown"); dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); dev_info(&pdev->dev, "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", - adapter->msix_entries ? "MSI-X" : + (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", adapter->num_rx_queues, adapter->num_tx_queues); - switch (hw->mac.type) { - case e1000_i350: - case e1000_i210: - case e1000_i211: - igb_set_eee_i350(hw); - break; - case e1000_i354: - if (hw->phy.media_type == e1000_media_type_copper) { + if (hw->phy.media_type == e1000_media_type_copper) { + switch (hw->mac.type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + /* Enable EEE for internal copper PHY devices */ + err = igb_set_eee_i350(hw, true, true); + if ((!err) && + (!hw->dev_spec._82575.eee_disable)) { + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; + adapter->flags |= IGB_FLAG_EEE; + } + break; + case e1000_i354: if ((rd32(E1000_CTRL_EXT) & - E1000_CTRL_EXT_LINK_MODE_SGMII)) - igb_set_eee_i354(hw); + E1000_CTRL_EXT_LINK_MODE_SGMII)) { + err = igb_set_eee_i354(hw, true, true); + if ((!err) && + (!hw->dev_spec._82575.eee_disable)) { + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; + adapter->flags |= IGB_FLAG_EEE; + } + } + break; + default: + break; } - break; - default: - break; } - pm_runtime_put_noidle(&pdev->dev); return 0; @@ -2380,8 +2659,12 @@ if (hw->flash_address) iounmap(hw->flash_address); err_sw_init: + kfree(adapter->shadow_vfta); igb_clear_interrupt_scheme(adapter); - iounmap(hw->hw_addr); +#ifdef CONFIG_PCI_IOV + igb_disable_sriov(pdev); +#endif + pci_iounmap(pdev, adapter->io_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: @@ -2394,7 +2677,7 @@ } #ifdef CONFIG_PCI_IOV -static int igb_disable_sriov(struct pci_dev *pdev) +static int igb_disable_sriov(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); @@ -2435,22 +2718,19 @@ int err = 0; int i; - if (!num_vfs) - goto out; - else if (old_vfs && old_vfs == num_vfs) - goto out; - else if (old_vfs && old_vfs != num_vfs) - err = igb_disable_sriov(pdev); - - if (err) - goto out; - - if (num_vfs > 7) { + if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) { err = -EPERM; goto out; } + if (!num_vfs) + goto out; - adapter->vfs_allocated_count = num_vfs; + if (old_vfs) { + dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n", + old_vfs, max_vfs); + adapter->vfs_allocated_count = old_vfs; + } else + adapter->vfs_allocated_count = num_vfs; adapter->vf_data = kcalloc(adapter->vfs_allocated_count, sizeof(struct vf_data_storage), GFP_KERNEL); @@ -2464,10 +2744,12 @@ goto out; } - err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); - if (err) - goto err_out; - + /* only call pci_enable_sriov() if no VFs are allocated already */ + if (!old_vfs) { + err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); + if (err) + goto err_out; + } dev_info(&pdev->dev, "%d VFs allocated\n", adapter->vfs_allocated_count); for (i = 0; i < adapter->vfs_allocated_count; i++) @@ -2541,15 +2823,15 @@ */ igb_release_hw_control(adapter); - unregister_netdev(netdev); - - igb_clear_interrupt_scheme(adapter); - #ifdef CONFIG_PCI_IOV igb_disable_sriov(pdev); #endif - iounmap(hw->hw_addr); + unregister_netdev(netdev); + + igb_clear_interrupt_scheme(adapter); + + pci_iounmap(pdev, adapter->io_addr); if (hw->flash_address) iounmap(hw->flash_address); pci_release_selected_regions(pdev, @@ -2582,6 +2864,13 @@ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) return; + /* Of the below we really only want the effect of getting + * IGB_FLAG_HAS_MSIX set (if available), without which + * igb_enable_sriov() has no effect. + */ + igb_set_interrupt_capability(adapter, true); + igb_reset_interrupt_capability(adapter); + pci_sriov_set_totalvfs(pdev, 7); igb_enable_sriov(pdev, max_vfs); @@ -2624,6 +2913,14 @@ adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + igb_set_flag_queue_pairs(adapter, max_rss_queues); +} + +void igb_set_flag_queue_pairs(struct igb_adapter *adapter, + const u32 max_rss_queues) +{ + struct e1000_hw *hw = &adapter->hw; + /* Determine if we need to pair queues. */ switch (hw->mac.type) { case e1000_82575: @@ -2704,6 +3001,11 @@ } #endif /* CONFIG_PCI_IOV */ + /* Assume MSI-X interrupts, will be checked during IRQ allocation */ + adapter->flags |= IGB_FLAG_HAS_MSIX; + + igb_probe_vfs(adapter); + igb_init_queue_configuration(adapter); /* Setup and initialize a copy of the hw vlan table array */ @@ -2716,8 +3018,6 @@ return -ENOMEM; } - igb_probe_vfs(adapter); - /* Explicitly disable IRQ since the NIC can be in any state. */ igb_irq_disable(adapter); @@ -2807,6 +3107,7 @@ /* notify VFs that reset has been completed */ if (adapter->vfs_allocated_count) { u32 reg_data = rd32(E1000_CTRL_EXT); + reg_data |= E1000_CTRL_EXT_PFRSTD; wr32(E1000_CTRL_EXT, reg_data); } @@ -2978,7 +3279,7 @@ * Configure a transmit ring after a reset. **/ void igb_configure_tx_ring(struct igb_adapter *adapter, - struct igb_ring *ring) + struct igb_ring *ring) { struct e1000_hw *hw = &adapter->hw; u32 txdctl = 0; @@ -3095,48 +3396,32 @@ { struct e1000_hw *hw = &adapter->hw; u32 mrqc, rxcsum; - u32 j, num_rx_queues, shift = 0; - static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741, - 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE, - 0xA32DCB77, 0x0CF23080, 0x3BB7426A, - 0xFA01ACBE }; + u32 j, num_rx_queues; + u32 rss_key[10]; - /* Fill out hash function seeds */ + netdev_rss_key_fill(rss_key, sizeof(rss_key)); for (j = 0; j < 10; j++) - wr32(E1000_RSSRK(j), rsskey[j]); + wr32(E1000_RSSRK(j), rss_key[j]); num_rx_queues = adapter->rss_queues; switch (hw->mac.type) { - case e1000_82575: - shift = 6; - break; case e1000_82576: /* 82576 supports 2 RSS queues for SR-IOV */ - if (adapter->vfs_allocated_count) { - shift = 3; + if (adapter->vfs_allocated_count) num_rx_queues = 2; - } break; default: break; } - /* Populate the indirection table 4 entries at a time. To do this - * we are generating the results for n and n+2 and then interleaving - * those with the results with n+1 and n+3. - */ - for (j = 0; j < 32; j++) { - /* first pass generates n and n+2 */ - u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues; - u32 reta = (base & 0x07800780) >> (7 - shift); - - /* second pass generates n+1 and n+3 */ - base += 0x00010001 * num_rx_queues; - reta |= (base & 0x07800780) << (1 + shift); - - wr32(E1000_RETA(j), reta); + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGB_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGB_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; } + igb_write_rss_indir_tbl(adapter); /* Disable raw packet checksumming so that RSS hash is placed in * descriptor on writeback. No need to enable TCP/UDP/IP checksum @@ -3174,6 +3459,7 @@ if (hw->mac.type > e1000_82575) { /* Set the default pool for the PF's first queue */ u32 vtctl = rd32(E1000_VT_CTL); + vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | E1000_VT_CTL_DISABLE_DEF_POOL); vtctl |= adapter->vfs_allocated_count << @@ -3255,7 +3541,7 @@ } static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, - int vfn) + int vfn) { struct e1000_hw *hw = &adapter->hw; u32 vmolr; @@ -3315,6 +3601,13 @@ vmolr = rd32(E1000_VMOLR(vfn)); vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ + if (hw->mac.type == e1000_i350) { + u32 dvmolr; + + dvmolr = rd32(E1000_DVMOLR(vfn)); + dvmolr |= E1000_DVMOLR_STRVLAN; + wr32(E1000_DVMOLR(vfn), dvmolr); + } if (aupe) vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ else @@ -3446,7 +3739,8 @@ int i; for (i = 0; i < adapter->num_tx_queues; i++) - igb_free_tx_resources(adapter->tx_ring[i]); + if (adapter->tx_ring[i]) + igb_free_tx_resources(adapter->tx_ring[i]); } void igb_unmap_and_free_tx_resource(struct igb_ring *ring, @@ -3511,7 +3805,8 @@ int i; for (i = 0; i < adapter->num_tx_queues; i++) - igb_clean_tx_ring(adapter->tx_ring[i]); + if (adapter->tx_ring[i]) + igb_clean_tx_ring(adapter->tx_ring[i]); } /** @@ -3548,7 +3843,8 @@ int i; for (i = 0; i < adapter->num_rx_queues; i++) - igb_free_rx_resources(adapter->rx_ring[i]); + if (adapter->rx_ring[i]) + igb_free_rx_resources(adapter->rx_ring[i]); } /** @@ -3603,7 +3899,8 @@ int i; for (i = 0; i < adapter->num_rx_queues; i++) - igb_clean_rx_ring(adapter->rx_ring[i]); + if (adapter->rx_ring[i]) + igb_clean_rx_ring(adapter->rx_ring[i]); } /** @@ -3738,9 +4035,8 @@ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); if (netdev->flags & IFF_PROMISC) { - u32 mrqc = rd32(E1000_MRQC); /* retain VLAN HW filtering if in VT mode */ - if (mrqc & E1000_MRQC_ENABLE_VMDQ) + if (adapter->vfs_allocated_count) rctl |= E1000_RCTL_VFE; rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); @@ -3796,7 +4092,8 @@ switch (hw->mac.type) { case e1000_82576: case e1000_i350: - if (!(wvbr = rd32(E1000_WVBR))) + wvbr = rd32(E1000_WVBR); + if (!wvbr) return; break; default: @@ -3815,7 +4112,7 @@ if (!adapter->wvbr) return; - for(j = 0; j < adapter->vfs_allocated_count; j++) { + for (j = 0; j < adapter->vfs_allocated_count; j++) { if (adapter->wvbr & (1 << j) || adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { dev_warn(&adapter->pdev->dev, @@ -3844,7 +4141,6 @@ { struct e1000_hw *hw = &adapter->hw; bool link_active = false; - s32 ret_val = 0; /* get_link_status is set on LSC (link status) interrupt or * rx sequence error interrupt. get_link_status will stay @@ -3853,22 +4149,28 @@ */ switch (hw->phy.media_type) { case e1000_media_type_copper: - if (hw->mac.get_link_status) { - ret_val = hw->mac.ops.check_for_link(hw); - link_active = !hw->mac.get_link_status; - } else { - link_active = true; - } - break; + if (!hw->mac.get_link_status) + return true; case e1000_media_type_internal_serdes: - ret_val = hw->mac.ops.check_for_link(hw); - link_active = hw->mac.serdes_has_link; + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; break; default: case e1000_media_type_unknown: break; } + if (((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) && + (hw->phy.id == I210_I_PHY_ID)) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) { + adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + } + } + return link_active; } @@ -3891,6 +4193,26 @@ } /** + * igb_check_lvmmc - check for malformed packets received + * and indicated in LVMMC register + * @adapter: pointer to adapter + **/ +static void igb_check_lvmmc(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 lvmmc; + + lvmmc = rd32(E1000_LVMMC); + if (lvmmc) { + if (unlikely(net_ratelimit())) { + netdev_warn(adapter->netdev, + "malformed Tx packet detected and dropped, LVMMC:0x%08x\n", + lvmmc); + } + } +} + +/** * igb_watchdog - Timer Call-back * @data: pointer to adapter cast into an unsigned long **/ @@ -3911,22 +4233,46 @@ struct net_device *netdev = adapter->netdev; u32 link; int i; + u32 connsw; link = igb_has_link(adapter); + + if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + else + link = false; + } + + /* Force link down if we have fiber to swap to */ + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + if (hw->phy.media_type == e1000_media_type_copper) { + connsw = rd32(E1000_CONNSW); + if (!(connsw & E1000_CONNSW_AUTOSENSE_EN)) + link = 0; + } + } if (link) { + /* Perform a reset if the media type changed. */ + if (hw->dev_spec._82575.media_changed) { + hw->dev_spec._82575.media_changed = false; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + igb_reset(adapter); + } /* Cancel scheduled suspend requests. */ pm_runtime_resume(netdev->dev.parent); if (!netif_carrier_ok(netdev)) { u32 ctrl; + hw->mac.ops.get_speed_and_duplex(hw, &adapter->link_speed, &adapter->link_duplex); ctrl = rd32(E1000_CTRL); /* Links status message must follow this format */ - printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s " - "Duplex, Flow Control: %s\n", + netdev_info(netdev, + "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", netdev->name, adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? @@ -3936,6 +4282,15 @@ (ctrl & E1000_CTRL_RFCE) ? "RX" : (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); + /* disable EEE if enabled */ + if ((adapter->flags & IGB_FLAG_EEE) && + (adapter->link_duplex == HALF_DUPLEX)) { + dev_info(&adapter->pdev->dev, + "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n"); + adapter->hw.dev_spec._82575.eee_disable = true; + adapter->flags &= ~IGB_FLAG_EEE; + } + /* check if SmartSpeed worked */ igb_check_downshift(hw); if (phy->speed_downgraded) @@ -3943,11 +4298,8 @@ /* check for thermal sensor event */ if (igb_thermal_sensor_event(hw, - E1000_THSTAT_LINK_THROTTLE)) { - netdev_info(netdev, "The network adapter link " - "speed was downshifted because it " - "overheated\n"); - } + E1000_THSTAT_LINK_THROTTLE)) + netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n"); /* adjust timeout factor according to speed/duplex */ adapter->tx_timeout_factor = 1; @@ -3978,12 +4330,11 @@ /* check for thermal sensor event */ if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) { - netdev_err(netdev, "The network adapter was " - "stopped because it overheated\n"); + netdev_err(netdev, "The network adapter was stopped because it overheated\n"); } /* Links status message must follow this format */ - printk(KERN_INFO "igb: %s NIC Link is Down\n", + netdev_info(netdev, "igb: %s NIC Link is Down\n", netdev->name); netif_carrier_off(netdev); @@ -3994,8 +4345,27 @@ mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); + /* link is down, time to check for alternate media */ + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + igb_check_swap_media(adapter); + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } pm_schedule_suspend(netdev->dev.parent, MSEC_PER_SEC * 5); + + /* also check for alternate media here */ + } else if (!netif_carrier_ok(netdev) && + (adapter->flags & IGB_FLAG_MAS_ENABLE)) { + igb_check_swap_media(adapter); + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } } } @@ -4024,8 +4394,9 @@ } /* Cause software interrupt to ensure Rx ring is cleaned */ - if (adapter->msix_entries) { + if (adapter->flags & IGB_FLAG_HAS_MSIX) { u32 eics = 0; + for (i = 0; i < adapter->num_q_vectors; i++) eics |= adapter->q_vector[i]->eims_value; wr32(E1000_EICS, eics); @@ -4036,10 +4407,20 @@ igb_spoof_check(adapter); igb_ptp_rx_hang(adapter); + /* Check LVMMC register on i350/i354 only */ + if ((adapter->hw.mac.type == e1000_i350) || + (adapter->hw.mac.type == e1000_i354)) + igb_check_lvmmc(adapter); + /* Reset the timer */ - if (!test_bit(__IGB_DOWN, &adapter->state)) - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + 2 * HZ)); + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + HZ)); + else + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } } enum latency_range { @@ -4060,8 +4441,7 @@ * were determined based on theoretical maximum wire speed and testing * data, in order to minimize response time while increasing bulk * throughput. - * This functionality is controlled by the InterruptThrottleRate module - * parameter (see igb_param.c) + * This functionality is controlled by ethtool's coalescing settings. * NOTE: This function is called only when operating in a multiqueue * receive environment. **/ @@ -4135,8 +4515,7 @@ * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time * while increasing bulk throughput. - * this functionality is controlled by the InterruptThrottleRate module - * parameter (see igb_param.c) + * This functionality is controlled by ethtool's coalescing settings. * NOTE: These calculations are only valid when operating in a single- * queue environment. **/ @@ -4162,13 +4541,12 @@ case low_latency: /* 50 usec aka 20000 ints/s */ if (bytes > 10000) { /* this if handles the TSO accounting */ - if (bytes/packets > 8000) { + if (bytes/packets > 8000) itrval = bulk_latency; - } else if ((packets < 10) || ((bytes/packets) > 1200)) { + else if ((packets < 10) || ((bytes/packets) > 1200)) itrval = bulk_latency; - } else if ((packets > 35)) { + else if ((packets > 35)) itrval = lowest_latency; - } } else if (bytes/packets > 2000) { itrval = bulk_latency; } else if (packets <= 2 && bytes < 512) { @@ -4284,6 +4662,7 @@ struct sk_buff *skb = first->skb; u32 vlan_macip_lens, type_tucmd; u32 mss_l4len_idx, l4len; + int err; if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; @@ -4291,16 +4670,14 @@ if (!skb_is_gso(skb)) return 0; - if (skb_header_cloned(skb)) { - int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (err) - return err; - } + err = skb_cow_head(skb, 0); + if (err < 0) + return err; /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; - if (first->protocol == __constant_htons(ETH_P_IP)) { + if (first->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; @@ -4355,13 +4732,14 @@ return; } else { u8 l4_hdr = 0; + switch (first->protocol) { - case __constant_htons(ETH_P_IP): + case htons(ETH_P_IP): vlan_macip_lens |= skb_network_header_len(skb); type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; l4_hdr = ip_hdr(skb)->protocol; break; - case __constant_htons(ETH_P_IPV6): + case htons(ETH_P_IPV6): vlan_macip_lens |= skb_network_header_len(skb); l4_hdr = ipv6_hdr(skb)->nexthdr; break; @@ -4461,6 +4839,41 @@ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); } +static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) +{ + struct net_device *netdev = tx_ring->netdev; + + netif_stop_subqueue(netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (igb_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_wake_subqueue(netdev, tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + + return 0; +} + +static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) +{ + if (igb_desc_unused(tx_ring) >= size) + return 0; + return __igb_maybe_stop_tx(tx_ring, size); +} + static void igb_tx_map(struct igb_ring *tx_ring, struct igb_tx_buffer *first, const u8 hdr_len) @@ -4563,13 +4976,17 @@ tx_ring->next_to_use = i; - writel(i, tx_ring->tail); + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); + if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + writel(i, tx_ring->tail); + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); + } return; dma_error: @@ -4589,47 +5006,13 @@ tx_ring->next_to_use = i; } -static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) -{ - struct net_device *netdev = tx_ring->netdev; - - netif_stop_subqueue(netdev, tx_ring->queue_index); - - /* Herbert's original patch had: - * smp_mb__after_netif_stop_queue(); - * but since that doesn't exist yet, just open code it. - */ - smp_mb(); - - /* We need to check again in a case another CPU has just - * made room available. - */ - if (igb_desc_unused(tx_ring) < size) - return -EBUSY; - - /* A reprieve! */ - netif_wake_subqueue(netdev, tx_ring->queue_index); - - u64_stats_update_begin(&tx_ring->tx_syncp2); - tx_ring->tx_stats.restart_queue2++; - u64_stats_update_end(&tx_ring->tx_syncp2); - - return 0; -} - -static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) -{ - if (igb_desc_unused(tx_ring) >= size) - return 0; - return __igb_maybe_stop_tx(tx_ring, size); -} - netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, struct igb_ring *tx_ring) { struct igb_tx_buffer *first; int tso; u32 tx_flags = 0; + unsigned short f; u16 count = TXD_USE_COUNT(skb_headlen(skb)); __be16 protocol = vlan_get_protocol(skb); u8 hdr_len = 0; @@ -4640,13 +5023,8 @@ * + 1 desc for context descriptor, * otherwise try next time */ - if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) { - unsigned short f; - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); - } else { - count += skb_shinfo(skb)->nr_frags; - } + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); if (igb_maybe_stop_tx(tx_ring, count + 3)) { /* this is a hard error */ @@ -4659,12 +5037,11 @@ first->bytecount = skb->len; first->gso_segs = 1; - skb_tx_timestamp(skb); - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); - if (!(adapter->ptp_tx_skb)) { + if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, + &adapter->state)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IGB_TX_FLAGS_TSTAMP; @@ -4675,9 +5052,11 @@ } } - if (vlan_tx_tag_present(skb)) { + skb_tx_timestamp(skb); + + if (skb_vlan_tag_present(skb)) { tx_flags |= IGB_TX_FLAGS_VLAN; - tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); + tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); } /* record initial flags and protocol */ @@ -4692,9 +5071,6 @@ igb_tx_map(tx_ring, first, hdr_len); - /* Make sure there is space in the ring for the next send. */ - igb_maybe_stop_tx(tx_ring, DESC_NEEDED); - return NETDEV_TX_OK; out_drop: @@ -4732,12 +5108,8 @@ /* The minimum packet size with TCTL.PSP set is 17 so pad the skb * in order to meet this minimum size requirement. */ - if (unlikely(skb->len < 17)) { - if (skb_pad(skb, 17 - skb->len)) - return NETDEV_TX_OK; - skb->len = 17; - skb_set_tail_pointer(skb, 17); - } + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); } @@ -4814,8 +5186,12 @@ return -EINVAL; } + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); /* igb_down has a dependency on max_frame_size */ adapter->max_frame_size = max_frame; @@ -4847,14 +5223,11 @@ struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; u32 reg, mpc; - u16 phy_tmp; int i; u64 bytes, packets; unsigned int start; u64 _bytes, _packets; -#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF - /* Prevent stats update while adapter is being reset, or if the pci * connection is down. */ @@ -4865,9 +5238,13 @@ bytes = 0; packets = 0; + + rcu_read_lock(); for (i = 0; i < adapter->num_rx_queues; i++) { - u32 rqdpc = rd32(E1000_RQDPC(i)); struct igb_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(E1000_RQDPC(i)); + if (hw->mac.type >= e1000_i210) + wr32(E1000_RQDPC(i), 0); if (rqdpc) { ring->rx_stats.drops += rqdpc; @@ -4875,10 +5252,10 @@ } do { - start = u64_stats_fetch_begin_bh(&ring->rx_syncp); + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); _bytes = ring->rx_stats.bytes; _packets = ring->rx_stats.packets; - } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); bytes += _bytes; packets += _packets; } @@ -4891,15 +5268,16 @@ for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *ring = adapter->tx_ring[i]; do { - start = u64_stats_fetch_begin_bh(&ring->tx_syncp); + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); _bytes = ring->tx_stats.bytes; _packets = ring->tx_stats.packets; - } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); bytes += _bytes; packets += _packets; } net_stats->tx_bytes = bytes; net_stats->tx_packets = packets; + rcu_read_unlock(); /* read stats registers */ adapter->stats.crcerrs += rd32(E1000_CRCERRS); @@ -5010,15 +5388,6 @@ /* Tx Dropped needs to be maintained elsewhere */ - /* Phy Stats */ - if (hw->phy.media_type == e1000_media_type_copper) { - if ((adapter->link_speed == SPEED_1000) && - (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { - phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; - adapter->phy_stats.idle_errors += phy_tmp; - } - } - /* Management Stats */ adapter->stats.mgptc += rd32(E1000_MGTPTC); adapter->stats.mgprc += rd32(E1000_MGTPRC); @@ -5034,6 +5403,81 @@ } } +static void igb_tsync_interrupt(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct ptp_clock_event event; + struct timespec64 ts; + u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR); + + if (tsicr & TSINTR_SYS_WRAP) { + event.type = PTP_CLOCK_PPS; + if (adapter->ptp_caps.pps) + ptp_clock_event(adapter->ptp_clock, &event); + else + dev_err(&adapter->pdev->dev, "unexpected SYS WRAP"); + ack |= TSINTR_SYS_WRAP; + } + + if (tsicr & E1000_TSICR_TXTS) { + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + ack |= E1000_TSICR_TXTS; + } + + if (tsicr & TSINTR_TT0) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[0].start, + adapter->perout[0].period); + /* u32 conversion of tv_sec is safe until y2106 */ + wr32(E1000_TRGTTIML0, ts.tv_nsec); + wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec); + tsauxc = rd32(E1000_TSAUXC); + tsauxc |= TSAUXC_EN_TT0; + wr32(E1000_TSAUXC, tsauxc); + adapter->perout[0].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= TSINTR_TT0; + } + + if (tsicr & TSINTR_TT1) { + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[1].start, + adapter->perout[1].period); + wr32(E1000_TRGTTIML1, ts.tv_nsec); + wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec); + tsauxc = rd32(E1000_TSAUXC); + tsauxc |= TSAUXC_EN_TT1; + wr32(E1000_TSAUXC, tsauxc); + adapter->perout[1].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= TSINTR_TT1; + } + + if (tsicr & TSINTR_AUTT0) { + nsec = rd32(E1000_AUXSTMPL0); + sec = rd32(E1000_AUXSTMPH0); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = sec * 1000000000ULL + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= TSINTR_AUTT0; + } + + if (tsicr & TSINTR_AUTT1) { + nsec = rd32(E1000_AUXSTMPL1); + sec = rd32(E1000_AUXSTMPH1); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = sec * 1000000000ULL + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= TSINTR_AUTT1; + } + + /* acknowledge the interrupts */ + wr32(E1000_TSICR, ack); +} + static irqreturn_t igb_msix_other(int irq, void *data) { struct igb_adapter *adapter = data; @@ -5065,16 +5509,8 @@ mod_timer(&adapter->watchdog_timer, jiffies + 1); } - if (icr & E1000_ICR_TS) { - u32 tsicr = rd32(E1000_TSICR); - - if (tsicr & E1000_TSICR_TXTS) { - /* acknowledge the interrupt */ - wr32(E1000_TSICR, E1000_TSICR_TXTS); - /* retrieve hardware timestamp */ - schedule_work(&adapter->ptp_tx_work); - } - } + if (icr & E1000_ICR_TS) + igb_tsync_interrupt(adapter); wr32(E1000_EIMS, adapter->eims_other); @@ -5291,6 +5727,7 @@ vmolr |= E1000_VMOLR_MPME; } else if (vf_data->num_vf_mc_hashes) { int j; + vmolr |= E1000_VMOLR_ROMPE; for (j = 0; j < vf_data->num_vf_mc_hashes; j++) igb_mta_set(hw, vf_data->vf_mc_hashes[j]); @@ -5342,6 +5779,7 @@ for (i = 0; i < adapter->vfs_allocated_count; i++) { u32 vmolr = rd32(E1000_VMOLR(i)); + vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); vf_data = &adapter->vf_data[i]; @@ -5440,6 +5878,7 @@ if (!adapter->vf_data[vf].vlans_enabled) { u32 size; + reg = rd32(E1000_VMOLR(vf)); size = reg & E1000_VMOLR_RLPML_MASK; size += 4; @@ -5468,6 +5907,7 @@ adapter->vf_data[vf].vlans_enabled--; if (!adapter->vf_data[vf].vlans_enabled) { u32 size; + reg = rd32(E1000_VMOLR(vf)); size = reg & E1000_VMOLR_RLPML_MASK; size -= 4; @@ -5572,8 +6012,8 @@ */ if (!add && (adapter->netdev->flags & IFF_PROMISC)) { u32 vlvf, bits; - int regndx = igb_find_vlvf_entry(adapter, vid); + if (regndx < 0) goto out; /* See if any other pools are set for this VLAN filter @@ -5657,8 +6097,12 @@ adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; /* reply to reset with ack and vf mac address */ - msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; - memcpy(addr, vf_mac, 6); + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK; + } igb_write_mbx(hw, msgbuf, 3, vf); } @@ -5849,16 +6293,8 @@ mod_timer(&adapter->watchdog_timer, jiffies + 1); } - if (icr & E1000_ICR_TS) { - u32 tsicr = rd32(E1000_TSICR); - - if (tsicr & E1000_TSICR_TXTS) { - /* acknowledge the interrupt */ - wr32(E1000_TSICR, E1000_TSICR_TXTS); - /* retrieve hardware timestamp */ - schedule_work(&adapter->ptp_tx_work); - } - } + if (icr & E1000_ICR_TS) + igb_tsync_interrupt(adapter); napi_schedule(&q_vector->napi); @@ -5903,16 +6339,8 @@ mod_timer(&adapter->watchdog_timer, jiffies + 1); } - if (icr & E1000_ICR_TS) { - u32 tsicr = rd32(E1000_TSICR); - - if (tsicr & E1000_TSICR_TXTS) { - /* acknowledge the interrupt */ - wr32(E1000_TSICR, E1000_TSICR_TXTS); - /* retrieve hardware timestamp */ - schedule_work(&adapter->ptp_tx_work); - } - } + if (icr & E1000_ICR_TS) + igb_tsync_interrupt(adapter); napi_schedule(&q_vector->napi); @@ -5933,7 +6361,7 @@ } if (!test_bit(__IGB_DOWN, &adapter->state)) { - if (adapter->msix_entries) + if (adapter->flags & IGB_FLAG_HAS_MSIX) wr32(E1000_EIMS, q_vector->eims_value); else igb_irq_enable(adapter); @@ -5951,6 +6379,7 @@ struct igb_q_vector, napi); bool clean_complete = true; + int work_done = 0; #ifdef CONFIG_IGB_DCA if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) @@ -5959,15 +6388,19 @@ if (q_vector->tx.ring) clean_complete = igb_clean_tx_irq(q_vector); - if (q_vector->rx.ring) - clean_complete &= igb_clean_rx_irq(q_vector, budget); + if (q_vector->rx.ring) { + int cleaned = igb_clean_rx_irq(q_vector, budget); + + work_done += cleaned; + clean_complete &= (cleaned < budget); + } /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; /* If not enough Rx work done, exit the polling mode */ - napi_complete(napi); + napi_complete_done(napi, work_done); igb_ring_irq_enable(q_vector); return 0; @@ -6018,7 +6451,7 @@ total_packets += tx_buffer->gso_segs; /* free the skb */ - dev_kfree_skb_any(tx_buffer->skb); + dev_consume_skb_any(tx_buffer->skb); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -6164,7 +6597,7 @@ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; /* transfer page from old buffer to new buffer */ - memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer)); + *new_buff = *old_buff; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, @@ -6173,12 +6606,17 @@ DMA_FROM_DEVICE); } +static inline bool igb_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, struct page *page, unsigned int truesize) { /* avoid re-using remote pages */ - if (unlikely(page_to_nid(page) != numa_node_id())) + if (unlikely(igb_page_is_reserved(page))) return false; #if (PAGE_SIZE < 8192) @@ -6188,23 +6626,19 @@ /* flip page offset to other buffer */ rx_buffer->page_offset ^= IGB_RX_BUFSZ; - - /* since we are the only owner of the page and we need to - * increment it, just set the value to 2 in order to avoid - * an unnecessary locked operation - */ - atomic_set(&page->_count, 2); #else /* move offset up to the next cache line */ rx_buffer->page_offset += truesize; if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) return false; - - /* bump ref count on page before it is given to the stack */ - get_page(page); #endif + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + atomic_inc(&page->_count); + return true; } @@ -6229,35 +6663,51 @@ struct sk_buff *skb) { struct page *page = rx_buffer->page; + unsigned char *va = page_address(page) + rx_buffer->page_offset; unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) unsigned int truesize = IGB_RX_BUFSZ; #else - unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int truesize = SKB_DATA_ALIGN(size); #endif + unsigned int pull_len; - if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; + if (unlikely(skb_is_nonlinear(skb))) + goto add_tail_frag; - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); - va += IGB_TS_HDR_LEN; - size -= IGB_TS_HDR_LEN; - } + if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { + igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); + va += IGB_TS_HDR_LEN; + size -= IGB_TS_HDR_LEN; + } + if (likely(size <= IGB_RX_HDR_LEN)) { memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - /* we can reuse buffer as-is, just make sure it is local */ - if (likely(page_to_nid(page) == numa_node_id())) + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!igb_page_is_reserved(page))) return true; /* this page cannot be reused so discard it */ - put_page(page); + __free_page(page); return false; } + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + va += pull_len; + size -= pull_len; + +add_tail_frag: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, truesize); + (unsigned long)va & ~PAGE_MASK, size, truesize); return igb_can_reuse_rx_page(rx_buffer, page, truesize); } @@ -6270,7 +6720,6 @@ struct page *page; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; - page = rx_buffer->page; prefetchw(page); @@ -6285,8 +6734,7 @@ #endif /* allocate a skb to store the frags */ - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - IGB_RX_HDR_LEN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); if (unlikely(!skb)) { rx_ring->rx_stats.alloc_failed++; return NULL; @@ -6367,7 +6815,9 @@ struct sk_buff *skb) { if (ring->netdev->features & NETIF_F_RXHASH) - skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + skb_set_hash(skb, + le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + PKT_HASH_TYPE_L3); } /** @@ -6399,169 +6849,6 @@ } /** - * igb_get_headlen - determine size of header for LRO/GRO - * @data: pointer to the start of the headers - * @max_len: total length of section to find headers in - * - * This function is meant to determine the length of headers that will - * be recognized by hardware for LRO, and GRO offloads. The main - * motivation of doing this is to only perform one pull for IPv4 TCP - * packets so that we can do basic things like calculating the gso_size - * based on the average data per packet. - **/ -static unsigned int igb_get_headlen(unsigned char *data, - unsigned int max_len) -{ - union { - unsigned char *network; - /* l2 headers */ - struct ethhdr *eth; - struct vlan_hdr *vlan; - /* l3 headers */ - struct iphdr *ipv4; - struct ipv6hdr *ipv6; - } hdr; - __be16 protocol; - u8 nexthdr = 0; /* default to not TCP */ - u8 hlen; - - /* this should never happen, but better safe than sorry */ - if (max_len < ETH_HLEN) - return max_len; - - /* initialize network frame pointer */ - hdr.network = data; - - /* set first protocol and move network header forward */ - protocol = hdr.eth->h_proto; - hdr.network += ETH_HLEN; - - /* handle any vlan tag if present */ - if (protocol == __constant_htons(ETH_P_8021Q)) { - if ((hdr.network - data) > (max_len - VLAN_HLEN)) - return max_len; - - protocol = hdr.vlan->h_vlan_encapsulated_proto; - hdr.network += VLAN_HLEN; - } - - /* handle L3 protocols */ - if (protocol == __constant_htons(ETH_P_IP)) { - if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) - return max_len; - - /* access ihl as a u8 to avoid unaligned access on ia64 */ - hlen = (hdr.network[0] & 0x0F) << 2; - - /* verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct iphdr)) - return hdr.network - data; - - /* record next protocol if header is present */ - if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) - nexthdr = hdr.ipv4->protocol; - } else if (protocol == __constant_htons(ETH_P_IPV6)) { - if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) - return max_len; - - /* record next protocol */ - nexthdr = hdr.ipv6->nexthdr; - hlen = sizeof(struct ipv6hdr); - } else { - return hdr.network - data; - } - - /* relocate pointer to start of L4 header */ - hdr.network += hlen; - - /* finally sort out TCP */ - if (nexthdr == IPPROTO_TCP) { - if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) - return max_len; - - /* access doff as a u8 to avoid unaligned access on ia64 */ - hlen = (hdr.network[12] & 0xF0) >> 2; - - /* verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct tcphdr)) - return hdr.network - data; - - hdr.network += hlen; - } else if (nexthdr == IPPROTO_UDP) { - if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) - return max_len; - - hdr.network += sizeof(struct udphdr); - } - - /* If everything has gone correctly hdr.network should be the - * data section of the packet and will be the end of the header. - * If not then it probably represents the end of the last recognized - * header. - */ - if ((hdr.network - data) < max_len) - return hdr.network - data; - else - return max_len; -} - -/** - * igb_pull_tail - igb specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on - * @rx_desc: pointer to the EOP Rx descriptor - * @skb: pointer to current skb being adjusted - * - * This function is an igb specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. - */ -static void igb_pull_tail(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va; - unsigned int pull_len; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - /* retrieve timestamp from buffer */ - igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); - - /* update pointers to remove timestamp header */ - skb_frag_size_sub(frag, IGB_TS_HDR_LEN); - frag->page_offset += IGB_TS_HDR_LEN; - skb->data_len -= IGB_TS_HDR_LEN; - skb->len -= IGB_TS_HDR_LEN; - - /* move va to start of packet data */ - va += IGB_TS_HDR_LEN; - } - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; -} - -/** * igb_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor @@ -6588,18 +6875,9 @@ } } - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - igb_pull_tail(rx_ring, rx_desc, skb); - - /* if skb_pad returns an error the skb was freed */ - if (unlikely(skb->len < 60)) { - int pad_len = 60 - skb->len; - - if (skb_pad(skb, pad_len)) - return true; - __skb_put(skb, pad_len); - } + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; return false; } @@ -6624,11 +6902,14 @@ igb_rx_checksum(rx_ring, rx_desc, skb); - igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb); + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && + !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) + igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { u16 vid; + if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) vid = be16_to_cpu(rx_desc->wb.upper.vlan); @@ -6643,14 +6924,14 @@ skb->protocol = eth_type_trans(skb, rx_ring->netdev); } -static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) +static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) { struct igb_ring *rx_ring = q_vector->rx.ring; struct sk_buff *skb = rx_ring->skb; unsigned int total_bytes = 0, total_packets = 0; u16 cleaned_count = igb_desc_unused(rx_ring); - do { + while (likely(total_packets < budget)) { union e1000_adv_rx_desc *rx_desc; /* return some buffers to hardware, one at a time is too slow */ @@ -6661,14 +6942,14 @@ rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); - if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) + if (!rx_desc->wb.upper.status_error) break; /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we know the - * RXD_STAT_DD bit is set + * descriptor has been written back */ - rmb(); + dma_rmb(); /* retrieve a buffer from the ring */ skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); @@ -6702,7 +6983,7 @@ /* update budget accounting */ total_packets++; - } while (likely(total_packets < budget)); + } /* place incomplete frames back on ring for completion */ rx_ring->skb = skb; @@ -6717,7 +6998,7 @@ if (cleaned_count) igb_alloc_rx_buffers(rx_ring, cleaned_count); - return (total_packets < budget); + return total_packets; } static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, @@ -6731,7 +7012,7 @@ return true; /* alloc new page for storage */ - page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL); + page = dev_alloc_page(); if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; return false; @@ -6793,8 +7074,8 @@ i -= rx_ring->count; } - /* clear the hdr_addr for the next_to_use descriptor */ - rx_desc->read.hdr_addr = 0; + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.upper.status_error = 0; cleaned_count--; } while (cleaned_count); @@ -6838,7 +7119,7 @@ break; case SIOCGMIIREG: if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, - &data->val_out)) + &data->val_out)) return -EIO; break; case SIOCSMIIREG: @@ -6861,13 +7142,29 @@ case SIOCGMIIREG: case SIOCSMIIREG: return igb_mii_ioctl(netdev, ifr, cmd); + case SIOCGHWTSTAMP: + return igb_ptp_get_ts_config(netdev, ifr); case SIOCSHWTSTAMP: - return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd); + return igb_ptp_set_ts_config(netdev, ifr); default: return -EOPNOTSUPP; } } +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) { struct igb_adapter *adapter = hw->back; @@ -7131,6 +7428,8 @@ pci_restore_state(pdev); pci_save_state(pdev); + if (!pci_device_is_present(pdev)) + return -ENODEV; err = pci_enable_device_mem(pdev); if (err) { dev_err(&pdev->dev, @@ -7144,6 +7443,7 @@ if (igb_init_interrupt_scheme(adapter, true)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + rtnl_unlock(); return -ENOMEM; } @@ -7168,7 +7468,6 @@ return 0; } -#ifdef CONFIG_PM_RUNTIME static int igb_runtime_idle(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -7205,8 +7504,7 @@ { return igb_resume(dev); } -#endif /* CONFIG_PM_RUNTIME */ -#endif +#endif /* CONFIG_PM */ static void igb_shutdown(struct pci_dev *pdev) { @@ -7239,6 +7537,7 @@ igb_init_queue_configuration(adapter); if (igb_init_interrupt_scheme(adapter, true)) { + rtnl_unlock(); dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } @@ -7302,7 +7601,7 @@ for (i = 0; i < adapter->num_q_vectors; i++) { q_vector = adapter->q_vector[i]; - if (adapter->msix_entries) + if (adapter->flags & IGB_FLAG_HAS_MSIX) wr32(E1000_EIMC, q_vector->eims_value); else igb_irq_disable(adapter); @@ -7539,7 +7838,8 @@ } } -static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) +static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, + int min_tx_rate, int max_tx_rate) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -7548,15 +7848,19 @@ if (hw->mac.type != e1000_82576) return -EOPNOTSUPP; + if (min_tx_rate) + return -EINVAL; + actual_link_speed = igb_link_mbps(adapter->link_speed); if ((vf >= adapter->vfs_allocated_count) || (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || - (tx_rate < 0) || (tx_rate > actual_link_speed)) + (max_tx_rate < 0) || + (max_tx_rate > actual_link_speed)) return -EINVAL; adapter->vf_rate_link_speed = actual_link_speed; - adapter->vf_data[vf].tx_rate = (u16)tx_rate; - igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); + adapter->vf_data[vf].tx_rate = (u16)max_tx_rate; + igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed); return 0; } @@ -7585,7 +7889,7 @@ wr32(reg_offset, reg_val); adapter->vf_data[vf].spoofchk_enabled = setting; - return E1000_SUCCESS; + return 0; } static int igb_ndo_get_vf_config(struct net_device *netdev, @@ -7596,7 +7900,8 @@ return -EINVAL; ivi->vf = vf; memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); - ivi->tx_rate = adapter->vf_data[vf].tx_rate; + ivi->max_tx_rate = adapter->vf_data[vf].tx_rate; + ivi->min_tx_rate = 0; ivi->vlan = adapter->vf_data[vf].pf_vlan; ivi->qos = adapter->vf_data[vf].pf_qos; ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; @@ -7621,11 +7926,13 @@ reg = rd32(E1000_DTXCTL); reg |= E1000_DTXCTL_VLAN_ADDED; wr32(E1000_DTXCTL, reg); + /* Fall through */ case e1000_82580: /* enable replication vlan tag stripping */ reg = rd32(E1000_RPLOLR); reg |= E1000_RPLOLR_STRVLAN; wr32(E1000_RPLOLR, reg); + /* Fall through */ case e1000_i350: /* none of the above registers are supported by i350 */ break; @@ -7715,6 +8022,7 @@ } /* endif adapter->dmac is not disabled */ } else if (hw->mac.type == e1000_82580) { u32 reg = rd32(E1000_PCIEMISC); + wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); wr32(E1000_DMACR, 0); } @@ -7743,8 +8051,7 @@ swfw_mask = E1000_SWFW_PHY0_SM; - if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) - != E1000_SUCCESS) + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return E1000_ERR_SWFW_SYNC; status = i2c_smbus_read_byte_data(this_client, byte_offset); @@ -7754,7 +8061,7 @@ return E1000_ERR_I2C; else { *data = status; - return E1000_SUCCESS; + return 0; } } @@ -7779,7 +8086,7 @@ if (!this_client) return E1000_ERR_I2C; - if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return E1000_ERR_SWFW_SYNC; status = i2c_smbus_write_byte_data(this_client, byte_offset, data); hw->mac.ops.release_swfw_sync(hw, swfw_mask); @@ -7787,7 +8094,29 @@ if (status) return E1000_ERR_I2C; else - return E1000_SUCCESS; + return 0; } + +int igb_reinit_queues(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (netif_running(netdev)) + igb_close(netdev); + + igb_reset_interrupt_capability(adapter); + + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + err = igb_open(netdev); + + return err; +} /* igb_main.c */