--- zzzz-none-000/linux-3.10.107/drivers/net/usb/usbnet.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/net/usb/usbnet.c 2021-02-04 17:41:59.000000000 +0000 @@ -14,8 +14,7 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * along with this program; if not, see . */ /* @@ -47,6 +46,12 @@ #include #include +#ifdef CONFIG_AVM_PA +#include +#endif + +#include + #define DRIVER_VERSION "22-Aug-2005" @@ -59,21 +64,20 @@ * For high speed, each frame comfortably fits almost 36 max size * Ethernet packets (so queues should be bigger). * - * REVISIT qlens should be members of 'struct usbnet'; the goal is to - * let the USB host controller be busy for 5msec or more before an irq - * is required, under load. Jumbograms change the equation. + * The goal is to let the USB host controller be busy for 5msec or + * more before an irq is required, under load. Jumbograms change + * the equation. */ -#define RX_MAX_QUEUE_MEMORY (60 * 1518) -#define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ - (RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4) -#define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ - (RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4) +#define MAX_QUEUE_MEMORY (60 * 1518) +#define RX_QLEN(dev) ((dev)->rx_qlen) +#define TX_QLEN(dev) ((dev)->tx_qlen) // reawaken network queue this soon after stopping; else watchdog barks #define TX_TIMEOUT_JIFFIES (5*HZ) -// throttle rx/tx briefly after some faults, so khubd might disconnect() -// us (it polls at HZ/4 usually) before we report too many false errors. +/* throttle rx/tx briefly after some faults, so hub_wq might disconnect() + * us (it polls at HZ/4 usually) before we report too many false errors. + */ #define THROTTLE_JIFFIES (HZ/8) // between wakeups @@ -162,20 +166,19 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) { - int tmp, i; + int tmp = -1, ret; unsigned char buf [13]; - tmp = usb_string(dev->udev, iMACAddress, buf, sizeof buf); - if (tmp != 12) { + ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf); + if (ret == 12) + tmp = hex2bin(dev->net->dev_addr, buf, 6); + if (tmp < 0) { dev_dbg(&dev->udev->dev, "bad MAC string %d fetch, %d\n", iMACAddress, tmp); - if (tmp >= 0) - tmp = -EINVAL; - return tmp; - } - for (i = tmp = 0; i < 6; i++, tmp += 2) - dev->net->dev_addr [i] = - (hex_to_bin(buf[tmp]) << 4) + hex_to_bin(buf[tmp + 1]); + if (ret >= 0) + ret = -EINVAL; + return ret; + } return 0; } EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); @@ -326,10 +329,15 @@ return; } - skb->protocol = eth_type_trans (skb, dev->net); dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb->len; +#ifdef CONFIG_AVM_PA + if (avm_pa_dev_pid_receive(AVM_PA_DEVINFO(dev->net), skb) == AVM_PA_RX_ACCELERATED) + return; +#endif + + skb->protocol = eth_type_trans(skb, dev->net); netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", skb->len + sizeof (struct ethhdr), skb->protocol); memset (skb->cb, 0, sizeof (struct skb_data)); @@ -344,6 +352,31 @@ } EXPORT_SYMBOL_GPL(usbnet_skb_return); +/* must be called if hard_mtu or rx_urb_size changed */ +void usbnet_update_max_qlen(struct usbnet *dev) +{ + enum usb_device_speed speed = dev->udev->speed; + + switch (speed) { + case USB_SPEED_HIGH: + dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size; + dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu; + break; + case USB_SPEED_SUPER: + /* + * Not take default 5ms qlen for super speed HC to + * save memory, and iperf tests show 2.5ms qlen can + * work well + */ + dev->rx_qlen = 5 * MAX_QUEUE_MEMORY / dev->rx_urb_size; + dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu; + break; + default: + dev->rx_qlen = dev->tx_qlen = 4; + } +} +EXPORT_SYMBOL_GPL(usbnet_update_max_qlen); + /*------------------------------------------------------------------------- * @@ -372,6 +405,9 @@ usbnet_unlink_rx_urbs(dev); } + /* max qlen depend on hard_mtu and rx_urb_size */ + usbnet_update_max_qlen(dev); + return 0; } EXPORT_SYMBOL_GPL(usbnet_change_mtu); @@ -403,12 +439,18 @@ old_state = entry->state; entry->state = state; __skb_unlink(skb, list); - spin_unlock(&list->lock); - spin_lock(&dev->done.lock); + + /* defer_bh() is never called with list == &dev->done. + * spin_lock_nested() tells lockdep that it is OK to take + * dev->done.lock here with list->lock held. + */ + spin_lock_nested(&dev->done.lock, SINGLE_DEPTH_NESTING); + __skb_queue_tail(&dev->done, skb); if (dev->done.qlen == 1) tasklet_schedule(&dev->bh); - spin_unlock_irqrestore(&dev->done.lock, flags); + spin_unlock(&dev->done.lock); + spin_unlock_irqrestore(&list->lock, flags); return old_state; } @@ -570,9 +612,9 @@ "rx shutdown, code %d\n", urb_status); goto block; - /* we get controller i/o faults during khubd disconnect() delays. + /* we get controller i/o faults during hub_wq disconnect() delays. * throttle down resubmits, to avoid log floods; just temporarily, - * so we still recover when the fault isn't a khubd delay. + * so we still recover when the fault isn't a hub_wq delay. */ case -EPROTO: case -ETIME: @@ -724,6 +766,20 @@ /*-------------------------------------------------------------------------*/ +static void wait_skb_queue_empty(struct sk_buff_head *q) +{ + unsigned long flags; + + spin_lock_irqsave(&q->lock, flags); + while (!skb_queue_empty(q)) { + spin_unlock_irqrestore(&q->lock, flags); + schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); + set_current_state(TASK_UNINTERRUPTIBLE); + spin_lock_irqsave(&q->lock, flags); + } + spin_unlock_irqrestore(&q->lock, flags); +} + // precondition: never called in_interrupt static void usbnet_terminate_urbs(struct usbnet *dev) { @@ -737,14 +793,11 @@ unlink_urbs(dev, &dev->rxq); /* maybe wait for deletions to finish. */ - while (!skb_queue_empty(&dev->rxq) - && !skb_queue_empty(&dev->txq) - && !skb_queue_empty(&dev->done)) { - schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); - set_current_state(TASK_UNINTERRUPTIBLE); - netif_dbg(dev, ifdown, dev->net, - "waited for %d urb completions\n", temp); - } + wait_skb_queue_empty(&dev->rxq); + wait_skb_queue_empty(&dev->txq); + wait_skb_queue_empty(&dev->done); + netif_dbg(dev, ifdown, dev->net, + "waited for %d urb completions\n", temp); set_current_state(TASK_RUNNING); remove_wait_queue(&dev->wait, &wait); } @@ -838,6 +891,9 @@ goto done; } + /* hard_mtu or rx_urb_size may change in reset() */ + usbnet_update_max_qlen(dev); + // insist peer be connected if (info->check_connect && (retval = info->check_connect (dev)) < 0) { netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval); @@ -922,6 +978,9 @@ if (dev->driver_info->link_reset) dev->driver_info->link_reset(dev); + /* hard_mtu or rx_urb_size may change in link_reset() */ + usbnet_update_max_qlen(dev); + return retval; } @@ -1015,16 +1074,34 @@ tasklet_schedule(&dev->bh); } + /* hard_mtu or rx_urb_size may change during link change */ + usbnet_update_max_qlen(dev); + clear_bit(EVENT_LINK_CHANGE, &dev->flags); } +static void usbnet_set_rx_mode(struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + + usbnet_defer_kevent(dev, EVENT_SET_RX_MODE); +} + +static void __handle_set_rx_mode(struct usbnet *dev) +{ + if (dev->driver_info->set_rx_mode) + (dev->driver_info->set_rx_mode)(dev); + + clear_bit(EVENT_SET_RX_MODE, &dev->flags); +} + /* work that cannot be done in interrupt context uses keventd. * * NOTE: with 2.5 we could do more of this using completion callbacks, * especially now that control transfers can be queued. */ static void -kevent (struct work_struct *work) +usbnet_deferred_kevent (struct work_struct *work) { struct usbnet *dev = container_of(work, struct usbnet, kevent); @@ -1123,6 +1200,10 @@ if (test_bit (EVENT_LINK_CHANGE, &dev->flags)) __handle_link_change(dev); + if (test_bit (EVENT_SET_RX_MODE, &dev->flags)) + __handle_set_rx_mode(dev); + + if (dev->flags) netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); } @@ -1136,8 +1217,7 @@ struct usbnet *dev = entry->dev; if (urb->status == 0) { - if (!(dev->driver_info->flags & FLAG_MULTI_PACKET)) - dev->net->stats.tx_packets++; + dev->net->stats.tx_packets += entry->packets; dev->net->stats.tx_bytes += entry->length; } else { dev->net->stats.tx_errors++; @@ -1152,8 +1232,9 @@ case -ESHUTDOWN: // hardware gone break; - // like rx, tx gets controller i/o faults during khubd delays - // and so it uses the same throttling mechanism. + /* like rx, tx gets controller i/o faults during hub_wq + * delays and so it uses the same throttling mechanism. + */ case -EPROTO: case -ETIME: case -EILSEQ: @@ -1185,18 +1266,55 @@ unlink_urbs (dev, &dev->txq); tasklet_schedule (&dev->bh); - - // FIXME: device recovery -- reset? + /* this needs to be handled individually because the generic layer + * doesn't know what is sufficient and could not restore private + * information if a remedy of an unconditional reset were used. + */ + if (dev->driver_info->recover) + (dev->driver_info->recover)(dev); } EXPORT_SYMBOL_GPL(usbnet_tx_timeout); /*-------------------------------------------------------------------------*/ +static int build_dma_sg(const struct sk_buff *skb, struct urb *urb) +{ + unsigned num_sgs, total_len = 0; + int i, s = 0; + + num_sgs = skb_shinfo(skb)->nr_frags + 1; + if (num_sgs == 1) + return 0; + + /* reserve one for zero packet */ + urb->sg = kmalloc((num_sgs + 1) * sizeof(struct scatterlist), + GFP_ATOMIC); + if (!urb->sg) + return -ENOMEM; + + urb->num_sgs = num_sgs; + sg_init_table(urb->sg, urb->num_sgs + 1); + + sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb)); + total_len += skb_headlen(skb); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + struct skb_frag_struct *f = &skb_shinfo(skb)->frags[i]; + + total_len += skb_frag_size(f); + sg_set_page(&urb->sg[i + s], f->page.p, f->size, + f->page_offset); + } + urb->transfer_buffer_length = total_len; + + return 1; +} + netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, struct net_device *net) { struct usbnet *dev = netdev_priv(net); - int length; + unsigned int length; struct urb *urb = NULL; struct skb_data *entry; struct driver_info *info = dev->driver_info; @@ -1218,7 +1336,6 @@ goto drop; } } - length = skb->len; if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) { netif_dbg(dev, tx_err, dev->net, "no urb\n"); @@ -1228,10 +1345,14 @@ entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; - entry->length = length; usb_fill_bulk_urb (urb, dev->udev, dev->out, skb->data, skb->len, tx_complete, skb); + if (dev->can_dma_sg) { + if (build_dma_sg(skb, urb) < 0) + goto drop; + } + length = urb->transfer_buffer_length; /* don't assume the hardware handles USB_ZERO_PACKET * NOTE: strictly conforming cdc-ether devices should expect @@ -1243,15 +1364,30 @@ if (length % dev->maxpacket == 0) { if (!(info->flags & FLAG_SEND_ZLP)) { if (!(info->flags & FLAG_MULTI_PACKET)) { - urb->transfer_buffer_length++; - if (skb_tailroom(skb)) { + length++; + if (skb_tailroom(skb) && !urb->num_sgs) { skb->data[skb->len] = 0; __skb_put(skb, 1); - } + } else if (urb->num_sgs) + sg_set_buf(&urb->sg[urb->num_sgs++], + dev->padding_pkt, 1); } } else urb->transfer_flags |= URB_ZERO_PACKET; } + urb->transfer_buffer_length = length; + + if (info->flags & FLAG_MULTI_PACKET) { + /* Driver has set number of packets and a length delta. + * Calculate the complete length and ensure that it's + * positive. + */ + entry->length += length; + if (WARN_ON_ONCE(entry->length <= 0)) + entry->length = length; + } else { + usbnet_set_skb_tx_stats(skb, 1, length); + } spin_lock_irqsave(&dev->txq.lock, flags); retval = usb_autopm_get_interface_async(dev->intf); @@ -1300,10 +1436,13 @@ not_drop: if (skb) dev_kfree_skb_any (skb); - usb_free_urb (urb); + if (urb) { + kfree(urb->sg); + usb_free_urb(urb); + } } else netif_dbg(dev, tx_queued, dev->net, - "> tx, len %d, type 0x%x\n", length, skb->protocol); + "> tx, len %u, type 0x%x\n", length, skb->protocol); #ifdef CONFIG_PM deferred: #endif @@ -1351,6 +1490,7 @@ rx_process (dev, skb); continue; case tx_done: + kfree(entry->urb->sg); case rx_cleanup: usb_free_urb (entry->urb); dev_kfree_skb (skb); @@ -1432,6 +1572,7 @@ usb_kill_urb(dev->interrupt); usb_free_urb(dev->interrupt); + kfree(dev->padding_pkt); free_netdev(net); } @@ -1442,6 +1583,7 @@ .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_set_rx_mode = usbnet_set_rx_mode, .ndo_change_mtu = usbnet_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, @@ -1459,6 +1601,7 @@ .name = "wwan", }; + int usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) { @@ -1513,7 +1656,7 @@ skb_queue_head_init(&dev->rxq_pause); dev->bh.func = usbnet_bh; dev->bh.data = (unsigned long) dev; - INIT_WORK (&dev->kevent, kevent); + INIT_WORK (&dev->kevent, usbnet_deferred_kevent); init_usb_anchor(&dev->deferred); dev->delay.function = usbnet_bh; dev->delay.data = (unsigned long) dev; @@ -1530,12 +1673,6 @@ * bind() should set rx_urb_size in that case. */ dev->hard_mtu = net->mtu + net->hard_header_len; -#if 0 -// dma_supported() is deeply broken on almost all architectures - // possible with some EHCI controllers - if (dma_supported (&udev->dev, DMA_BIT_MASK(64))) - net->features |= NETIF_F_HIGHDMA; -#endif net->netdev_ops = &usbnet_netdev_ops; net->watchdog_timeo = TX_TIMEOUT_JIFFIES; @@ -1548,6 +1685,13 @@ if (status < 0) goto out1; +/*--- 20180222 AVM/WKR - we need lte0 for LTE product ---*/ +#if defined (CONFIG_ATH79_MACH_AVM_HW214) || defined (CONFIG_ATH79_MACH_AVM_HW254) + strcpy(net->name, "lte%d"); +#endif + +/*--- 20160222 AVM/VGJ - CHANGESET: AVM-TWEAK: we always want usb0, not eth4 ---*/ +#if 0 // heuristic: "usb%d" for links we know are two-host, // else "eth%d" when there's reasonable doubt. userspace // can rename the link if it knows better. @@ -1555,12 +1699,18 @@ ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 || (net->dev_addr [0] & 0x02) == 0)) strcpy (net->name, "eth%d"); +#endif + /* WLAN devices should always be named "wlan%d" */ if ((dev->driver_info->flags & FLAG_WLAN) != 0) strcpy(net->name, "wlan%d"); + +/*--- 20160926 AVM/WKR - CHANGESET: AVM-TWEAK: use usb0 for cdc_ncm ---*/ +#if 0 /* WWAN devices should always be named "wwan%d" */ if ((dev->driver_info->flags & FLAG_WWAN) != 0) strcpy(net->name, "wwan%d"); +#endif /* devices that cannot do ARP */ if ((dev->driver_info->flags & FLAG_NOARP) != 0) @@ -1591,14 +1741,30 @@ dev->rx_urb_size = dev->hard_mtu; dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); + /* let userspace know we have a random address */ + if (ether_addr_equal(net->dev_addr, node_id)) + net->addr_assign_type = NET_ADDR_RANDOM; + if ((dev->driver_info->flags & FLAG_WLAN) != 0) SET_NETDEV_DEVTYPE(net, &wlan_type); if ((dev->driver_info->flags & FLAG_WWAN) != 0) SET_NETDEV_DEVTYPE(net, &wwan_type); + /* initialize max rx_qlen and tx_qlen */ + usbnet_update_max_qlen(dev); + + if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) && + !(info->flags & FLAG_MULTI_PACKET)) { + dev->padding_pkt = kzalloc(1, GFP_KERNEL); + if (!dev->padding_pkt) { + status = -ENOMEM; + goto out4; + } + } + status = register_netdev (net); if (status) - goto out4; + goto out5; netif_info(dev, probe, dev->net, "register '%s' at usb-%s-%s, %s, %pM\n", udev->dev.driver->name, @@ -1614,8 +1780,14 @@ if (dev->driver_info->flags & FLAG_LINK_INTR) usbnet_link_change(dev, 0, 0); +#ifdef CONFIG_AVM_PA + avm_pa_dev_register(dev->net); +#endif + return 0; +out5: + kfree(dev->padding_pkt); out4: usb_free_urb(dev->interrupt); out3: @@ -1693,6 +1865,7 @@ retval = usb_submit_urb(res, GFP_ATOMIC); if (retval < 0) { dev_kfree_skb_any(skb); + kfree(res->sg); usb_free_urb(res); usb_autopm_put_interface_async(dev->intf); } else {