--- zzzz-none-000/linux-4.4.60/net/core/dev.c 2017-04-08 07:53:53.000000000 +0000 +++ dragonfly-4020-701/linux-4.4.60/net/core/dev.c 2018-11-08 13:36:17.000000000 +0000 @@ -97,6 +97,9 @@ #include #include #include +#ifdef CONFIG_AVM_PA +#include +#endif #include #include #include @@ -139,6 +142,7 @@ #include #include "net-sysfs.h" +#include "skbuff_debug.h" /* Instead of increasing this, you should create a hash table. */ #define MAX_GRO_SKBS 8 @@ -157,6 +161,9 @@ struct net_device *dev, struct netdev_notifier_info *info); +static int (*avm_recvhook)(struct sk_buff *skb); +static int (*avm_early_recvhook)(struct sk_buff *skb); + /* * The @dev_base_head list is protected by @dev_base_lock and the rtnl * semaphore. @@ -354,6 +361,18 @@ *******************************************************************************/ +void set_avm_recvhook(int (*recvhook)(struct sk_buff *skb)) +{ + avm_recvhook = recvhook; +} +EXPORT_SYMBOL(set_avm_recvhook); + +void set_avm_early_recvhook(int (*recvhook)(struct sk_buff *skb)) +{ + avm_early_recvhook = recvhook; +} +EXPORT_SYMBOL(set_avm_early_recvhook); + /* * Add a protocol ID to the list. Now that the input handler is * smarter we can dispense with all the messy stuff that used to be @@ -513,6 +532,7 @@ out: spin_unlock(&offload_lock); } +EXPORT_SYMBOL(__dev_remove_offload); /** * dev_remove_offload - remove packet offload handler @@ -2732,13 +2752,31 @@ unsigned int len; int rc; - if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all)) - dev_queue_xmit_nit(skb, dev); + /* If this skb has been fast forwarded then we don't want it to + * go to any taps (by definition we're trying to bypass them). + */ + if (unlikely(!skb->fast_forwarded)) { + if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all)) + dev_queue_xmit_nit(skb, dev); + } - len = skb->len; - trace_net_dev_start_xmit(skb, dev); - rc = netdev_start_xmit(skb, dev, txq, more); - trace_net_dev_xmit(skb, rc, dev, len); +#ifdef CONFIG_ETHERNET_PACKET_MANGLE + if (!dev->eth_mangle_tx || + (skb = dev->eth_mangle_tx(dev, skb)) != NULL) +#else + if (1) +#endif + { + len = skb->len; + trace_net_dev_start_xmit(skb, dev); +#ifdef CONFIG_NET_DEBUG_SKBUFF_LEAK + skb_track_funccall(skb, ops->ndo_start_xmit); +#endif + rc = netdev_start_xmit(skb, dev, txq, more); + trace_net_dev_xmit(skb, rc, dev, len); + } else { + rc = NETDEV_TX_OK; + } return rc; } @@ -2749,6 +2787,10 @@ struct sk_buff *skb = first; int rc = NETDEV_TX_OK; +#ifdef CONFIG_AVM_PA + avm_pa_dev_snoop_transmit(AVM_PA_DEVINFO(dev), skb); +#endif + while (skb) { struct sk_buff *next = skb->next; @@ -2935,6 +2977,9 @@ rc = NET_XMIT_SUCCESS; } else { +#if defined(CONFIG_AVM_PA) && defined(AVM_PA_MARK_SHAPED) + avm_pa_mark_shaped(skb); +#endif rc = q->enqueue(skb, q) & NET_XMIT_MASK; if (qdisc_run_begin(q)) { if (unlikely(contended)) { @@ -3102,6 +3147,10 @@ struct Qdisc *q; int rc = -ENOMEM; +#ifdef CONFIG_NET_DEBUG_SKBUFF_LEAK + skb_track_caller(skb); +#endif + skb_reset_mac_header(skb); if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) @@ -3557,6 +3606,10 @@ { int ret; +#ifdef CONFIG_NET_DEBUG_SKBUFF_LEAK + skb_track_caller(skb); +#endif + net_timestamp_check(netdev_tstamp_prequeue, skb); trace_netif_rx(skb); @@ -3813,6 +3866,9 @@ } EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); +int (*athrs_fast_nat_recv)(struct sk_buff *skb) __rcu __read_mostly; +EXPORT_SYMBOL_GPL(athrs_fast_nat_recv); + /* * Limit the use of PFMEMALLOC reserves to those protocols that implement * the special handling of PFMEMALLOC skbs. @@ -3855,6 +3911,7 @@ bool deliver_exact = false; int ret = NET_RX_DROP; __be16 type; + int (*fast_recv)(struct sk_buff *skb); net_timestamp_check(!netdev_tstamp_prequeue, skb); @@ -3869,6 +3926,11 @@ pt_prev = NULL; +#if IS_ENABLED(CONFIG_AVM_NET_SKB_INPUT_DEV) + if (!skb->input_dev) + skb->input_dev = skb->dev; +#endif + another_round: skb->skb_iif = skb->dev->ifindex; @@ -3881,6 +3943,14 @@ goto out; } + fast_recv = rcu_dereference(athrs_fast_nat_recv); + if (fast_recv) { + if (fast_recv(skb)) { + ret = NET_RX_SUCCESS; + goto out; + } + } + #ifdef CONFIG_NET_CLS_ACT if (skb->tc_verd & TC_NCLS) { skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); @@ -3921,6 +3991,24 @@ if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) goto drop; +#ifdef CONFIG_AVM_PA +#ifdef CONFIG_AVM_NET_DEBUG_SKBUFF_LEAK + skb_track_funccall(skb, avm_pa_dev_receive); +#endif + if (avm_pa_dev_receive(AVM_PA_DEVINFO(skb->dev), skb) == 0) { + ret = NET_RX_SUCCESS; + goto out; + } +#endif + + if (avm_early_recvhook && (*avm_early_recvhook)(skb)) { + /* + * paket consumed by hook + */ + ret = NET_RX_SUCCESS; + goto out; + } + if (skb_vlan_tag_present(skb)) { if (pt_prev) { ret = deliver_skb(skb, pt_prev, orig_dev); @@ -3963,6 +4051,14 @@ skb->vlan_tci = 0; } + if (skb && avm_recvhook && (*avm_recvhook)(skb)) { + /* + * paket consumed by hook + */ + ret = NET_RX_SUCCESS; + goto out; + } + type = skb->protocol; /* deliver only exact match when indicated */ @@ -4246,6 +4342,9 @@ enum gro_result ret; int grow; + if (skb->gro_skip) + goto normal; + if (!(skb->dev->features & NETIF_F_GRO)) goto normal; @@ -4388,6 +4487,7 @@ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) { skb_dst_drop(skb); kmem_cache_free(skbuff_head_cache, skb); + skbuff_debugobj_deactivate(skb); } else { __kfree_skb(skb); } @@ -5405,6 +5505,48 @@ &upper_dev->adj_list.lower); } +static void __netdev_addr_mask(unsigned char *mask, const unsigned char *addr, + struct net_device *dev) +{ + int i; + + for (i = 0; i < dev->addr_len; i++) + mask[i] |= addr[i] ^ dev->dev_addr[i]; +} + +static void __netdev_upper_mask(unsigned char *mask, struct net_device *dev, + struct net_device *lower) +{ + struct net_device *cur; + struct list_head *iter; + + netdev_for_each_upper_dev_rcu(dev, cur, iter) { + __netdev_addr_mask(mask, cur->dev_addr, lower); + __netdev_upper_mask(mask, cur, lower); + } +} + +static void __netdev_update_addr_mask(struct net_device *dev) +{ + unsigned char mask[MAX_ADDR_LEN]; + struct net_device *cur; + struct list_head *iter; + + memset(mask, 0, sizeof(mask)); + __netdev_upper_mask(mask, dev, dev); + memcpy(dev->local_addr_mask, mask, dev->addr_len); + + netdev_for_each_lower_dev(dev, cur, iter) + __netdev_update_addr_mask(cur); +} + +static void netdev_update_addr_mask(struct net_device *dev) +{ + rcu_read_lock(); + __netdev_update_addr_mask(dev); + rcu_read_unlock(); +} + static int __netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, bool master, void *private) @@ -5476,6 +5618,7 @@ goto rollback_lower_mesh; } + netdev_update_addr_mask(dev); call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev, &changeupper_info.info); return 0; @@ -5602,6 +5745,7 @@ list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr); + netdev_update_addr_mask(dev); call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev, &changeupper_info.info); } @@ -6142,6 +6286,7 @@ if (err) return err; dev->addr_assign_type = NET_ADDR_SET; + netdev_update_addr_mask(dev); call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); add_device_randomness(dev->dev_addr, dev->addr_len); return 0; @@ -6992,7 +7137,9 @@ WARN_ON(rcu_access_pointer(dev->ip_ptr)); WARN_ON(rcu_access_pointer(dev->ip6_ptr)); WARN_ON(dev->dn_ptr); - +#ifdef CONFIG_AVM_PA + avm_pa_dev_unregister(AVM_PA_DEVINFO(dev)); +#endif if (dev->destructor) dev->destructor(dev); @@ -7174,6 +7321,9 @@ INIT_LIST_HEAD(&dev->ptype_all); INIT_LIST_HEAD(&dev->ptype_specific); dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; +#ifdef CONFIG_AVM_PA + avm_pa_dev_init(AVM_PA_DEVINFO(dev)); +#endif setup(dev); if (!dev->tx_queue_len) {