--- zzzz-none-000/linux-3.10.107/drivers/net/ifb.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/net/ifb.c 2021-02-04 17:41:59.000000000 +0000 @@ -37,85 +37,93 @@ #include #include +#if IS_ENABLED(CONFIG_AVM_PA) +#include +#endif + #define TX_Q_LIMIT 32 -struct ifb_private { +struct ifb_q_private { + struct net_device *dev; struct tasklet_struct ifb_tasklet; - int tasklet_pending; - - struct u64_stats_sync rsync; + int tasklet_pending; + int txqnum; struct sk_buff_head rq; - u64 rx_packets; - u64 rx_bytes; + u64 rx_packets; + u64 rx_bytes; + struct u64_stats_sync rsync; struct u64_stats_sync tsync; + u64 tx_packets; + u64 tx_bytes; struct sk_buff_head tq; - u64 tx_packets; - u64 tx_bytes; -}; +} ____cacheline_aligned_in_smp; -static int numifbs = 2; +struct ifb_dev_private { + struct ifb_q_private *tx_private; +}; -static void ri_tasklet(unsigned long dev); static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev); static int ifb_open(struct net_device *dev); static int ifb_close(struct net_device *dev); -static void ri_tasklet(unsigned long dev) +static void ifb_ri_tasklet(unsigned long _txp) { - struct net_device *_dev = (struct net_device *)dev; - struct ifb_private *dp = netdev_priv(_dev); + struct ifb_q_private *txp = (struct ifb_q_private *)_txp; struct netdev_queue *txq; struct sk_buff *skb; - txq = netdev_get_tx_queue(_dev, 0); - if ((skb = skb_peek(&dp->tq)) == NULL) { - if (__netif_tx_trylock(txq)) { - skb_queue_splice_tail_init(&dp->rq, &dp->tq); - __netif_tx_unlock(txq); - } else { - /* reschedule */ + txq = netdev_get_tx_queue(txp->dev, txp->txqnum); + skb = skb_peek(&txp->tq); + if (!skb) { + if (!__netif_tx_trylock(txq)) goto resched; - } + skb_queue_splice_tail_init(&txp->rq, &txp->tq); + __netif_tx_unlock(txq); } - while ((skb = __skb_dequeue(&dp->tq)) != NULL) { + while ((skb = __skb_dequeue(&txp->tq)) != NULL) { u32 from = G_TC_FROM(skb->tc_verd); skb->tc_verd = 0; skb->tc_verd = SET_TC_NCLS(skb->tc_verd); - u64_stats_update_begin(&dp->tsync); - dp->tx_packets++; - dp->tx_bytes += skb->len; - u64_stats_update_end(&dp->tsync); + u64_stats_update_begin(&txp->tsync); + txp->tx_packets++; + txp->tx_bytes += skb->len; + u64_stats_update_end(&txp->tsync); rcu_read_lock(); - skb->dev = dev_get_by_index_rcu(dev_net(_dev), skb->skb_iif); + skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif); if (!skb->dev) { rcu_read_unlock(); dev_kfree_skb(skb); - _dev->stats.tx_dropped++; - if (skb_queue_len(&dp->tq) != 0) + txp->dev->stats.tx_dropped++; + if (skb_queue_len(&txp->tq) != 0) goto resched; break; } rcu_read_unlock(); - skb->skb_iif = _dev->ifindex; + skb->skb_iif = txp->dev->ifindex; if (from & AT_EGRESS) { dev_queue_xmit(skb); } else if (from & AT_INGRESS) { - skb_pull(skb, skb->dev->hard_header_len); - netif_receive_skb(skb); + skb_pull(skb, skb->mac_len); +#if IS_ENABLED(CONFIG_AVM_PA) + AVM_PKT_INFO(skb)->ingress_pid_handle = 0; + if (avm_pa_dev_pid_receive(AVM_PA_DEVINFO(txp->dev), skb) != AVM_PA_RX_STOLEN) +#endif + netif_receive_skb(skb); } else BUG(); } if (__netif_tx_trylock(txq)) { - if ((skb = skb_peek(&dp->rq)) == NULL) { - dp->tasklet_pending = 0; - if (netif_queue_stopped(_dev)) - netif_wake_queue(_dev); + skb = skb_peek(&txp->rq); + if (!skb) { + txp->tasklet_pending = 0; + if (netif_tx_queue_stopped(txq)) + netif_tx_wake_queue(txq); } else { __netif_tx_unlock(txq); goto resched; @@ -123,8 +131,8 @@ __netif_tx_unlock(txq); } else { resched: - dp->tasklet_pending = 1; - tasklet_schedule(&dp->ifb_tasklet); + txp->tasklet_pending = 1; + tasklet_schedule(&txp->ifb_tasklet); } } @@ -132,29 +140,58 @@ static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { - struct ifb_private *dp = netdev_priv(dev); + struct ifb_dev_private *dp = netdev_priv(dev); + struct ifb_q_private *txp = dp->tx_private; unsigned int start; + u64 packets, bytes; + int i; - do { - start = u64_stats_fetch_begin_bh(&dp->rsync); - stats->rx_packets = dp->rx_packets; - stats->rx_bytes = dp->rx_bytes; - } while (u64_stats_fetch_retry_bh(&dp->rsync, start)); - - do { - start = u64_stats_fetch_begin_bh(&dp->tsync); - - stats->tx_packets = dp->tx_packets; - stats->tx_bytes = dp->tx_bytes; - - } while (u64_stats_fetch_retry_bh(&dp->tsync, start)); - + for (i = 0; i < dev->num_tx_queues; i++,txp++) { + do { + start = u64_stats_fetch_begin_irq(&txp->rsync); + packets = txp->rx_packets; + bytes = txp->rx_bytes; + } while (u64_stats_fetch_retry_irq(&txp->rsync, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + + do { + start = u64_stats_fetch_begin_irq(&txp->tsync); + packets = txp->tx_packets; + bytes = txp->tx_bytes; + } while (u64_stats_fetch_retry_irq(&txp->tsync, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; return stats; } +static int ifb_dev_init(struct net_device *dev) +{ + struct ifb_dev_private *dp = netdev_priv(dev); + struct ifb_q_private *txp; + int i; + + txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL); + if (!txp) + return -ENOMEM; + dp->tx_private = txp; + for (i = 0; i < dev->num_tx_queues; i++,txp++) { + txp->txqnum = i; + txp->dev = dev; + __skb_queue_head_init(&txp->rq); + __skb_queue_head_init(&txp->tq); + u64_stats_init(&txp->rsync); + u64_stats_init(&txp->tsync); + tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet, + (unsigned long)txp); + netif_tx_start_queue(netdev_get_tx_queue(dev, i)); + } + return 0; +} static const struct net_device_ops ifb_netdev_ops = { .ndo_open = ifb_open, @@ -162,6 +199,7 @@ .ndo_get_stats64 = ifb_stats64, .ndo_start_xmit = ifb_xmit, .ndo_validate_addr = eth_validate_addr, + .ndo_init = ifb_dev_init, }; #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \ @@ -169,10 +207,24 @@ NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \ NETIF_F_HW_VLAN_STAG_TX) +static void ifb_dev_free(struct net_device *dev) +{ + struct ifb_dev_private *dp = netdev_priv(dev); + struct ifb_q_private *txp = dp->tx_private; + int i; + + for (i = 0; i < dev->num_tx_queues; i++,txp++) { + tasklet_kill(&txp->ifb_tasklet); + __skb_queue_purge(&txp->rq); + __skb_queue_purge(&txp->tq); + } + kfree(dp->tx_private); + free_netdev(dev); +} + static void ifb_setup(struct net_device *dev) { /* Initialize the device structure. */ - dev->destructor = free_netdev; dev->netdev_ops = &ifb_netdev_ops; /* Fill in device structure with ethernet-generic values. */ @@ -180,23 +232,27 @@ dev->tx_queue_len = TX_Q_LIMIT; dev->features |= IFB_FEATURES; - dev->vlan_features |= IFB_FEATURES; + dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; - dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + netif_keep_dst(dev); eth_hw_addr_random(dev); + dev->destructor = ifb_dev_free; } static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) { - struct ifb_private *dp = netdev_priv(dev); + struct ifb_dev_private *dp = netdev_priv(dev); u32 from = G_TC_FROM(skb->tc_verd); + struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb); - u64_stats_update_begin(&dp->rsync); - dp->rx_packets++; - dp->rx_bytes += skb->len; - u64_stats_update_end(&dp->rsync); + u64_stats_update_begin(&txp->rsync); + txp->rx_packets++; + txp->rx_bytes += skb->len; + u64_stats_update_end(&txp->rsync); if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) { dev_kfree_skb(skb); @@ -204,14 +260,13 @@ return NETDEV_TX_OK; } - if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) { - netif_stop_queue(dev); - } + if (skb_queue_len(&txp->rq) >= dev->tx_queue_len) + netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum)); - __skb_queue_tail(&dp->rq, skb); - if (!dp->tasklet_pending) { - dp->tasklet_pending = 1; - tasklet_schedule(&dp->ifb_tasklet); + __skb_queue_tail(&txp->rq, skb); + if (!txp->tasklet_pending) { + txp->tasklet_pending = 1; + tasklet_schedule(&txp->ifb_tasklet); } return NETDEV_TX_OK; @@ -219,24 +274,13 @@ static int ifb_close(struct net_device *dev) { - struct ifb_private *dp = netdev_priv(dev); - - tasklet_kill(&dp->ifb_tasklet); - netif_stop_queue(dev); - __skb_queue_purge(&dp->rq); - __skb_queue_purge(&dp->tq); + netif_tx_stop_all_queues(dev); return 0; } static int ifb_open(struct net_device *dev) { - struct ifb_private *dp = netdev_priv(dev); - - tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev); - __skb_queue_head_init(&dp->rq); - __skb_queue_head_init(&dp->tq); - netif_start_queue(dev); - + netif_tx_start_all_queues(dev); return 0; } @@ -253,22 +297,81 @@ static struct rtnl_link_ops ifb_link_ops __read_mostly = { .kind = "ifb", - .priv_size = sizeof(struct ifb_private), + .priv_size = sizeof(struct ifb_dev_private), .setup = ifb_setup, .validate = ifb_validate, }; -/* Number of ifb devices to be set up by this module. */ +/* Number of ifb devices to be set up by this module. + * Note that these legacy devices have one queue. + * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb + */ +static int numifbs = 2; module_param(numifbs, int, 0); MODULE_PARM_DESC(numifbs, "Number of ifb devices"); + +#if IS_ENABLED(CONFIG_AVM_PA) +static void ifb_dev_transmit(void *arg, struct sk_buff *skb) +{ + int rc; + struct net_device *dev_ifb = (struct net_device *) arg; + struct net_device *orig_dev; + + orig_dev = __dev_get_by_index(dev_net(dev_ifb), skb->skb_iif); + if (unlikely(!orig_dev)) { + printk(KERN_ERR "ifb_dev_transmit(%s) orig_dev not found\n", + ((struct net_device *)arg)->name); + kfree_skb(skb); + return; + } + /* Clear pkt_info so that avm_pa can create one more session after + * going through ifb_xmit() */ + skb->dev = dev_ifb; + /* avm_pa already performs a push for certain ingress framing types */ + if (skb_mac_header(skb) != skb->data) + skb_push(skb, orig_dev->hard_header_len); + skb->pkt_type = PACKET_HOST; + rc = dev_queue_xmit(skb); + if (rc != 0 && net_ratelimit()) { + printk(KERN_ERR "ifb_dev_transmit(%s) %d\n", + ((struct net_device *)arg)->name, rc); + } +} + +static void register_avm_pa_pid(struct net_device *dev_ifb) +{ + struct avm_pa_pid_cfg cfg = { + .framing = avm_pa_framing_dev, + .default_mtu = 1500, + .tx_func = ifb_dev_transmit, + .tx_arg = dev_ifb, + 0 + }; + struct avm_pa_pid_ecfg ecfg = { + .version = AVM_PA_PID_ECFG_VERSION, + .flags = AVM_PA_PID_FLAG_NO_PID_CHANGED_CHECK, + 0, + }; + + strlcpy(cfg.name, dev_ifb->name, sizeof(cfg.name)); + if (avm_pa_dev_pid_register(AVM_PA_DEVINFO(dev_ifb), &cfg) < 0) + printk(KERN_ERR "%s: failed to register PA PID\n", cfg.name); + + if (avm_pa_pid_set_ecfg(AVM_PA_DEVINFO(dev_ifb)->pid_handle, &ecfg)) { + printk(KERN_ERR "%s: failed to set extended cfg for PA PID\n", cfg.name); + } +} +#endif + + static int __init ifb_init_one(int index) { struct net_device *dev_ifb; int err; - dev_ifb = alloc_netdev(sizeof(struct ifb_private), - "ifb%d", ifb_setup); + dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d", + NET_NAME_UNKNOWN, ifb_setup); if (!dev_ifb) return -ENOMEM; @@ -278,6 +381,10 @@ if (err < 0) goto err; +#if IS_ENABLED(CONFIG_AVM_PA) + register_avm_pa_pid(dev_ifb); +#endif + return 0; err: