--- zzzz-none-000/linux-3.10.107/drivers/net/ethernet/amd/pcnet32.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/net/ethernet/amd/pcnet32.c 2021-02-04 17:41:59.000000000 +0000 @@ -61,7 +61,7 @@ /* * PCI device identifiers for "new style" Linux PCI Device Drivers */ -static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = { +static const struct pci_device_id pcnet32_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), }, @@ -481,37 +481,32 @@ dma_addr_t *new_dma_addr_list; struct pcnet32_tx_head *new_tx_ring; struct sk_buff **new_skb_list; + unsigned int entries = BIT(size); pcnet32_purge_tx_ring(dev); - new_tx_ring = pci_alloc_consistent(lp->pci_dev, - sizeof(struct pcnet32_tx_head) * - (1 << size), - &new_ring_dma_addr); - if (new_tx_ring == NULL) { - netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); + new_tx_ring = + pci_zalloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * entries, + &new_ring_dma_addr); + if (new_tx_ring == NULL) return; - } - memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size)); - new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), - GFP_ATOMIC); + new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC); if (!new_dma_addr_list) goto free_new_tx_ring; - new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *), - GFP_ATOMIC); + new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC); if (!new_skb_list) goto free_new_lists; kfree(lp->tx_skbuff); kfree(lp->tx_dma_addr); pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_tx_head) * - lp->tx_ring_size, lp->tx_ring, - lp->tx_ring_dma_addr); + sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, + lp->tx_ring, lp->tx_ring_dma_addr); - lp->tx_ring_size = (1 << size); + lp->tx_ring_size = entries; lp->tx_mod_mask = lp->tx_ring_size - 1; lp->tx_len_bits = (size << 12); lp->tx_ring = new_tx_ring; @@ -524,8 +519,7 @@ kfree(new_dma_addr_list); free_new_tx_ring: pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_tx_head) * - (1 << size), + sizeof(struct pcnet32_tx_head) * entries, new_tx_ring, new_ring_dma_addr); } @@ -549,35 +543,32 @@ struct pcnet32_rx_head *new_rx_ring; struct sk_buff **new_skb_list; int new, overlap; + unsigned int entries = BIT(size); - new_rx_ring = pci_alloc_consistent(lp->pci_dev, - sizeof(struct pcnet32_rx_head) * - (1 << size), - &new_ring_dma_addr); - if (new_rx_ring == NULL) { - netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); + new_rx_ring = + pci_zalloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * entries, + &new_ring_dma_addr); + if (new_rx_ring == NULL) return; - } - memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size)); - new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), GFP_ATOMIC); + new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC); if (!new_dma_addr_list) goto free_new_rx_ring; - new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *), - GFP_ATOMIC); + new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC); if (!new_skb_list) goto free_new_lists; /* first copy the current receive buffers */ - overlap = min(size, lp->rx_ring_size); + overlap = min(entries, lp->rx_ring_size); for (new = 0; new < overlap; new++) { new_rx_ring[new] = lp->rx_ring[new]; new_dma_addr_list[new] = lp->rx_dma_addr[new]; new_skb_list[new] = lp->rx_skbuff[new]; } /* now allocate any new buffers needed */ - for (; new < size; new++) { + for (; new < entries; new++) { struct sk_buff *rx_skbuff; new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB); rx_skbuff = new_skb_list[new]; @@ -592,6 +583,13 @@ new_dma_addr_list[new] = pci_map_single(lp->pci_dev, rx_skbuff->data, PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(lp->pci_dev, + new_dma_addr_list[new])) { + netif_err(lp, drv, dev, "%s dma mapping failed\n", + __func__); + dev_kfree_skb(new_skb_list[new]); + goto free_all_new; + } new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE); new_rx_ring[new].status = cpu_to_le16(0x8000); @@ -599,8 +597,12 @@ /* and free any unneeded buffers */ for (; new < lp->rx_ring_size; new++) { if (lp->rx_skbuff[new]) { - pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], - PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (!pci_dma_mapping_error(lp->pci_dev, + lp->rx_dma_addr[new])) + pci_unmap_single(lp->pci_dev, + lp->rx_dma_addr[new], + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); dev_kfree_skb(lp->rx_skbuff[new]); } } @@ -612,7 +614,7 @@ lp->rx_ring_size, lp->rx_ring, lp->rx_ring_dma_addr); - lp->rx_ring_size = (1 << size); + lp->rx_ring_size = entries; lp->rx_mod_mask = lp->rx_ring_size - 1; lp->rx_len_bits = (size << 4); lp->rx_ring = new_rx_ring; @@ -624,8 +626,12 @@ free_all_new: while (--new >= lp->rx_ring_size) { if (new_skb_list[new]) { - pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], - PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (!pci_dma_mapping_error(lp->pci_dev, + new_dma_addr_list[new])) + pci_unmap_single(lp->pci_dev, + new_dma_addr_list[new], + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); dev_kfree_skb(new_skb_list[new]); } } @@ -634,8 +640,7 @@ kfree(new_dma_addr_list); free_new_rx_ring: pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_rx_head) * - (1 << size), + sizeof(struct pcnet32_rx_head) * entries, new_rx_ring, new_ring_dma_addr); } @@ -650,8 +655,12 @@ lp->rx_ring[i].status = 0; /* CPU owns buffer */ wmb(); /* Make sure adapter sees owner change */ if (lp->rx_skbuff[i]) { - pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], - PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (!pci_dma_mapping_error(lp->pci_dev, + lp->rx_dma_addr[i])) + pci_unmap_single(lp->pci_dev, + lp->rx_dma_addr[i], + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); dev_kfree_skb_any(lp->rx_skbuff[i]); } lp->rx_skbuff[i] = NULL; @@ -930,6 +939,12 @@ lp->tx_dma_addr[x] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) { + netif_printk(lp, hw, KERN_DEBUG, dev, + "DMA mapping error at line: %d!\n", + __LINE__); + goto clean_up; + } lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]); wmb(); /* Make sure owner changes after all others are visible */ lp->tx_ring[x].status = cpu_to_le16(status); @@ -1142,24 +1157,36 @@ if (pkt_len > rx_copybreak) { struct sk_buff *newskb; + dma_addr_t new_dma_addr; newskb = netdev_alloc_skb(dev, PKT_BUF_SKB); + /* + * map the new buffer, if mapping fails, drop the packet and + * reuse the old buffer + */ if (newskb) { skb_reserve(newskb, NET_IP_ALIGN); - skb = lp->rx_skbuff[entry]; - pci_unmap_single(lp->pci_dev, - lp->rx_dma_addr[entry], - PKT_BUF_SIZE, - PCI_DMA_FROMDEVICE); - skb_put(skb, pkt_len); - lp->rx_skbuff[entry] = newskb; - lp->rx_dma_addr[entry] = - pci_map_single(lp->pci_dev, - newskb->data, - PKT_BUF_SIZE, - PCI_DMA_FROMDEVICE); - rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]); - rx_in_place = 1; + new_dma_addr = pci_map_single(lp->pci_dev, + newskb->data, + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) { + netif_err(lp, rx_err, dev, + "DMA mapping error.\n"); + dev_kfree_skb(newskb); + skb = NULL; + } else { + skb = lp->rx_skbuff[entry]; + pci_unmap_single(lp->pci_dev, + lp->rx_dma_addr[entry], + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); + skb_put(skb, pkt_len); + lp->rx_skbuff[entry] = newskb; + lp->rx_dma_addr[entry] = new_dma_addr; + rxp->base = cpu_to_le32(new_dma_addr); + rx_in_place = 1; + } } else skb = NULL; } else @@ -1473,10 +1500,11 @@ return -ENODEV; } - if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) { + err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK); + if (err) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("architecture does not support 32bit PCI busmaster DMA\n"); - return -ENODEV; + return err; } if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { if (pcnet32_debug & NETIF_MSG_PROBE) @@ -1521,7 +1549,7 @@ char *chipname; struct net_device *dev; const struct pcnet32_access *a = NULL; - u8 promaddr[6]; + u8 promaddr[ETH_ALEN]; int ret = -ENODEV; /* reset the chip */ @@ -1692,23 +1720,23 @@ } /* read PROM address and compare with CSR address */ - for (i = 0; i < 6; i++) + for (i = 0; i < ETH_ALEN; i++) promaddr[i] = inb(ioaddr + i); - if (memcmp(promaddr, dev->dev_addr, 6) || + if (!ether_addr_equal(promaddr, dev->dev_addr) || !is_valid_ether_addr(dev->dev_addr)) { if (is_valid_ether_addr(promaddr)) { if (pcnet32_debug & NETIF_MSG_PROBE) { pr_cont(" warning: CSR address invalid,\n"); pr_info(" using instead PROM address of"); } - memcpy(dev->dev_addr, promaddr, 6); + memcpy(dev->dev_addr, promaddr, ETH_ALEN); } } /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ if (!is_valid_ether_addr(dev->dev_addr)) - memset(dev->dev_addr, 0, ETH_ALEN); + eth_zero_addr(dev->dev_addr); if (pcnet32_debug & NETIF_MSG_PROBE) { pr_cont(" %pM", dev->dev_addr); @@ -2256,9 +2284,12 @@ lp->tx_ring[i].status = 0; /* CPU owns buffer */ wmb(); /* Make sure adapter sees owner change */ if (lp->tx_skbuff[i]) { - pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], - lp->tx_skbuff[i]->len, - PCI_DMA_TODEVICE); + if (!pci_dma_mapping_error(lp->pci_dev, + lp->tx_dma_addr[i])) + pci_unmap_single(lp->pci_dev, + lp->tx_dma_addr[i], + lp->tx_skbuff[i]->len, + PCI_DMA_TODEVICE); dev_kfree_skb_any(lp->tx_skbuff[i]); } lp->tx_skbuff[i] = NULL; @@ -2291,10 +2322,19 @@ } rmb(); - if (lp->rx_dma_addr[i] == 0) + if (lp->rx_dma_addr[i] == 0) { lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data, PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(lp->pci_dev, + lp->rx_dma_addr[i])) { + /* there is not much we can do at this point */ + netif_err(lp, drv, dev, + "%s pci dma mapping error\n", + __func__); + return -1; + } + } lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE); wmb(); /* Make sure owner changes after all others are visible */ @@ -2424,9 +2464,14 @@ lp->tx_ring[entry].misc = 0x00000000; - lp->tx_skbuff[entry] = skb; lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) { + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + goto drop_packet; + } + lp->tx_skbuff[entry] = skb; lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]); wmb(); /* Make sure owner changes after all others are visible */ lp->tx_ring[entry].status = cpu_to_le16(status); @@ -2441,6 +2486,7 @@ lp->tx_full = 1; netif_stop_queue(dev); } +drop_packet: spin_unlock_irqrestore(&lp->lock, flags); return NETDEV_TX_OK; } @@ -2788,7 +2834,7 @@ /* * Check for loss of link and link establishment. - * Can not use mii_check_media because it does nothing if mode is forced. + * Could possibly be changed to use mii_check_media instead. */ static void pcnet32_watchdog(struct net_device *dev) @@ -2845,7 +2891,6 @@ lp->init_block, lp->init_dma_addr); free_netdev(dev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } }