--- zzzz-none-000/linux-4.9.276/net/core/dev.c 2021-07-20 14:21:16.000000000 +0000 +++ falcon-5530-750/linux-4.9.276/net/core/dev.c 2023-04-05 08:19:02.000000000 +0000 @@ -100,6 +100,8 @@ #include #include #include +#include +#include #include #include #include @@ -143,6 +145,9 @@ #include #include +#if IS_ENABLED(CONFIG_PPA_API_SW_FASTPATH) +#include +#endif #include "net-sysfs.h" /* Instead of increasing this, you should create a hash table. */ @@ -157,6 +162,17 @@ struct list_head ptype_all __read_mostly; /* Taps */ static struct list_head offload_base __read_mostly; +#ifdef CONFIG_NETWORK_EXTMARK +#define match_extmark_filtertap(skb) (skb->extmark & FILTERTAP_MASK) +#else +#define match_extmark_filtertap(skb) 0 +#endif + +#if IS_ENABLED(CONFIG_MCAST_HELPER) +int (*mcast_helper_sig_check_update_ptr)(struct sk_buff *skb) = NULL; +EXPORT_SYMBOL(mcast_helper_sig_check_update_ptr); +#endif + static int netif_rx_internal(struct sk_buff *skb); static int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev, @@ -359,6 +375,23 @@ *******************************************************************************/ +#ifdef CONFIG_AVM_RECV_HOOKS +static int (*avm_recvhook)(struct sk_buff *skb); +static int (*avm_early_recvhook)(struct sk_buff *skb); + +void set_avm_recvhook(int (*recvhook)(struct sk_buff *skb)) +{ + avm_recvhook = recvhook; +} +EXPORT_SYMBOL(set_avm_recvhook); + +void set_avm_early_recvhook(int (*recvhook)(struct sk_buff *skb)) +{ + avm_early_recvhook = recvhook; +} +EXPORT_SYMBOL(set_avm_early_recvhook); +#endif + /* * Add a protocol ID to the list. Now that the input handler is * smarter we can dispense with all the messy stuff that used to be @@ -2965,13 +2998,29 @@ unsigned int len; int rc; - if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all)) + /* At this point all offload features are handled and the skb is + * optimized for the driver. + */ + avm_pa_dev_snoop_transmit(AVM_PA_DEVINFO(dev), skb); + + if (!match_extmark_filtertap(skb) && + (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))) dev_queue_xmit_nit(skb, dev); - len = skb->len; - trace_net_dev_start_xmit(skb, dev); - rc = netdev_start_xmit(skb, dev, txq, more); - trace_net_dev_xmit(skb, rc, dev, len); +#ifdef CONFIG_ETHERNET_PACKET_MANGLE + if (!dev->eth_mangle_tx || + (skb = dev->eth_mangle_tx(dev, skb)) != NULL) +#else + if (1) +#endif + { + len = skb->len; + trace_net_dev_start_xmit(skb, dev); + rc = netdev_start_xmit(skb, dev, txq, more); + trace_net_dev_xmit(skb, rc, dev, len); + } else { + rc = NETDEV_TX_OK; + } return rc; } @@ -3065,7 +3114,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev) { - struct sk_buff *next, *head = NULL, *tail; + struct sk_buff *next, *head = NULL, *tail = NULL; for (; skb != NULL; skb = next) { next = skb->next; @@ -3177,6 +3226,7 @@ rc = NET_XMIT_SUCCESS; } else { + avm_pa_mark_shaped(skb); rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; if (qdisc_run_begin(q)) { if (unlikely(contended)) { @@ -3268,6 +3318,7 @@ return NULL; case TC_ACT_STOLEN: case TC_ACT_QUEUED: + case TC_ACT_TRAP: *ret = NET_XMIT_SUCCESS; consume_skb(skb); return NULL; @@ -3403,11 +3454,26 @@ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED); + avm_simple_profiling_skb(0, skb); + /* Disable soft irqs for various locks below. Also * stops preemption for RCU. */ rcu_read_lock_bh(); +#if IS_ENABLED(CONFIG_MCAST_HELPER) + if (mcast_helper_sig_check_update_ptr != NULL) { + /*ret = mcast_helper_sig_check_update_ptr(skb);*/ + mcast_helper_sig_check_update_ptr(skb); + /*if (ret == 1) + { + //dev_kfree_skb_any(skb); + //rcu_read_unlock_bh(); + //return rc; + }*/ + } +#endif + skb_update_prio(skb); qdisc_pkt_len_init(skb); @@ -4020,6 +4086,7 @@ return NULL; case TC_ACT_STOLEN: case TC_ACT_QUEUED: + case TC_ACT_TRAP: consume_skb(skb); return NULL; case TC_ACT_REDIRECT: @@ -4219,6 +4286,25 @@ if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) goto drop; +#ifdef CONFIG_AVM_NET_DEBUG_SKBUFF_LEAK + skb_track_funccall(skb, avm_pa_dev_receive); +#endif + + if (avm_pa_dev_receive(AVM_PA_DEVINFO(skb->dev), skb) == 0) { + ret = NET_RX_SUCCESS; + goto out; + } + +#ifdef CONFIG_AVM_RECV_HOOKS + if (avm_early_recvhook && (*avm_early_recvhook)(skb)) { + /* + * paket consumed by hook + */ + ret = NET_RX_SUCCESS; + goto out; + } +#endif + if (skb_vlan_tag_present(skb)) { if (pt_prev) { ret = deliver_skb(skb, pt_prev, orig_dev); @@ -4261,6 +4347,16 @@ skb->vlan_tci = 0; } +#ifdef CONFIG_AVM_RECV_HOOKS + if (avm_recvhook && (*avm_recvhook)(skb)) { + /* + * paket consumed by hook + */ + ret = NET_RX_SUCCESS; + goto out; + } +#endif + type = skb->protocol; /* deliver only exact match when indicated */ @@ -4568,6 +4664,9 @@ enum gro_result ret; int grow; + if (skb->gro_skip) + goto normal; + if (!(skb->dev->features & NETIF_F_GRO)) goto normal; @@ -4945,7 +5044,12 @@ while ((skb = __skb_dequeue(&sd->process_queue))) { rcu_read_lock(); - __netif_receive_skb(skb); +#if IS_ENABLED(CONFIG_PPA_API_SW_FASTPATH) + /* Try for PPA software acceleration, if possible */ + if (!ppa_hook_sw_fastpath_send_fn || + ppa_hook_sw_fastpath_send_fn(skb)) +#endif + __netif_receive_skb(skb); rcu_read_unlock(); input_queue_head_incr(sd); if (++work >= quota) @@ -5871,6 +5975,48 @@ &upper_dev->adj_list.lower); } +static void __netdev_addr_mask(unsigned char *mask, const unsigned char *addr, + struct net_device *dev) +{ + int i; + + for (i = 0; i < dev->addr_len; i++) + mask[i] |= addr[i] ^ dev->dev_addr[i]; +} + +static void __netdev_upper_mask(unsigned char *mask, struct net_device *dev, + struct net_device *lower) +{ + struct net_device *cur; + struct list_head *iter; + + netdev_for_each_upper_dev_rcu (dev, cur, iter) { + __netdev_addr_mask(mask, cur->dev_addr, lower); + __netdev_upper_mask(mask, cur, lower); + } +} + +static void __netdev_update_addr_mask(struct net_device *dev) +{ + unsigned char mask[MAX_ADDR_LEN]; + struct net_device *cur; + struct list_head *iter; + + memset(mask, 0, sizeof(mask)); + __netdev_upper_mask(mask, dev, dev); + memcpy(dev->local_addr_mask, mask, dev->addr_len); + + netdev_for_each_lower_dev (dev, cur, iter) + __netdev_update_addr_mask(cur); +} + +static void netdev_update_addr_mask(struct net_device *dev) +{ + rcu_read_lock(); + __netdev_update_addr_mask(dev); + rcu_read_unlock(); +} + static int __netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, bool master, void *upper_priv, void *upper_info) @@ -5943,6 +6089,7 @@ goto rollback_lower_mesh; } + netdev_update_addr_mask(dev); ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev, &changeupper_info.info); ret = notifier_to_errno(ret); @@ -6069,6 +6216,7 @@ list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr); + netdev_update_addr_mask(dev); call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev, &changeupper_info.info); } @@ -6672,6 +6820,7 @@ if (err) return err; dev->addr_assign_type = NET_ADDR_SET; + netdev_update_addr_mask(dev); call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); add_device_randomness(dev->dev_addr, dev->addr_len); return 0; @@ -7583,6 +7732,7 @@ dev->reg_state = NETREG_UNREGISTERED; netdev_wait_allrefs(dev); + avm_pa_dev_unregister_sync(AVM_PA_DEVINFO(dev)); /* paranoia */ BUG_ON(netdev_refcnt_read(dev)); @@ -7783,6 +7933,7 @@ hash_init(dev->qdisc_hash); #endif dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; + avm_pa_dev_init(AVM_PA_DEVINFO(dev)); setup(dev); if (!dev->tx_queue_len) {