--- zzzz-none-000/linux-5.15.111/net/core/dev.c 2023-05-11 14:00:40.000000000 +0000 +++ puma7-atom-6670-761/linux-5.15.111/net/core/dev.c 2024-02-07 10:23:30.000000000 +0000 @@ -67,6 +67,12 @@ * J Hadi Salim : - Backlog queue sampling * - netif_rx() feedback */ +/* + * Includes Maxlinear's changes dated: 2021, 2023. + * Changed portions - Copyright 2021-2023 MaxLinear, Inc. + * Includes Intel Corporation's changes dated: 2016, 2020. + * Changed portions - Copyright 2016-2020, Intel Corporation. + */ #include #include @@ -98,6 +104,7 @@ #include #include #include +#include #include #include #include @@ -119,6 +126,7 @@ #include #include #include +#include #include #include #include @@ -153,6 +161,10 @@ #include "net-sysfs.h" +#ifndef CONFIG_ARM_AVALANCHE_SOC +#include +#endif + #define MAX_GRO_SKBS 8 /* This should be increased if a protocol with a bigger head is added. */ @@ -164,6 +176,11 @@ struct list_head ptype_all __read_mostly; /* Taps */ static struct list_head offload_base __read_mostly; +#if IS_ENABLED(CONFIG_MCAST_HELPER) +int (*mcast_helper_sig_check_update_ptr)(struct sk_buff *skb) = NULL; +EXPORT_SYMBOL(mcast_helper_sig_check_update_ptr); +#endif + static int netif_rx_internal(struct sk_buff *skb); static int call_netdevice_notifiers_info(unsigned long val, struct netdev_notifier_info *info); @@ -491,6 +508,25 @@ } #endif +#ifdef CONFIG_TI_DEVICE_PROTOCOL_HANDLING +extern int ti_protocol_handler (struct net_device* dev, struct sk_buff *skb); +#endif +#ifdef CONFIG_TI_DEVICE_INDEX_REUSE +extern int ti_dev_new_index(struct net *net); +#endif /* CONFIG_TI_DEVICE_INDEX_REUSE */ +#ifdef CONFIG_TI_L2_SELECTIVE_FORWARDER +extern void ti_save_netdevice_info(struct net_device *dev); +extern void ti_free_netdevice_info(struct net_device *dev); +#endif /* CONFIG_TI_L2_SELECTIVE_FORWARDER */ +#ifdef CONFIG_TI_EGRESS_HOOK +extern int ti_egress_hook_handler (struct net_device* dev, struct sk_buff *skb); +#endif /* CONFIG_TI_EGRESS_HOOK */ +#ifdef CONFIG_TI_DOCSIS_EGRESS_HOOK +extern int ti_docsis_egress_hook_handler (struct net_device* dev, struct sk_buff *skb); +#endif /* CONFIG_TI_DOCSIS_EGRESS_HOOK */ +#ifdef CONFIG_TI_GW_EGRESS_HOOK +extern int ti_gw_egress_hook_handler (struct net_device* dev, struct sk_buff *skb); +#endif /* CONFIG_TI_GW_EGRESS_HOOK */ /******************************************************************************* * * Protocol management and registration routines @@ -498,6 +534,23 @@ *******************************************************************************/ +#ifdef CONFIG_AVM_RECV_HOOKS +static int (*avm_recvhook)(struct sk_buff *skb); +static int (*avm_early_recvhook)(struct sk_buff *skb); + +void set_avm_recvhook(int (*recvhook)(struct sk_buff *skb)) +{ + avm_recvhook = recvhook; +} +EXPORT_SYMBOL(set_avm_recvhook); + +void set_avm_early_recvhook(int (*recvhook)(struct sk_buff *skb)) +{ + avm_early_recvhook = recvhook; +} +EXPORT_SYMBOL(set_avm_early_recvhook); +#endif + /* * Add a protocol ID to the list. Now that the input handler is * smarter we can dispense with all the messy stuff that used to be @@ -1179,7 +1232,11 @@ } EXPORT_SYMBOL(dev_alloc_name); +#ifdef CONFIG_OPENVSWITCH_BRCOMPAT +int dev_get_valid_name(struct net *net, struct net_device *dev, +#else static int dev_get_valid_name(struct net *net, struct net_device *dev, +#endif const char *name) { BUG_ON(!net); @@ -1196,6 +1253,9 @@ return 0; } +#ifdef CONFIG_OPENVSWITCH_BRCOMPAT +EXPORT_SYMBOL(dev_get_valid_name); +#endif /** * dev_change_name - change name of a device @@ -3583,11 +3643,22 @@ unsigned int len; int rc; + /* At this point all offload features are handled and the skb is + * optimized for the driver. + */ + avm_pa_dev_snoop_transmit(AVM_PA_DEVINFO(dev), skb); + if (dev_nit_active(dev)) dev_queue_xmit_nit(skb, dev); len = skb->len; PRANDOM_ADD_NOISE(skb, dev, txq, len + jiffies); + +#ifdef CONFIG_AVM_NET_DEBUG_SKBUFF_LEAK + if (!more) + skb_track_funccall(skb, dev->ops->ndo_start_xmit); +#endif + trace_net_dev_start_xmit(skb, dev); rc = netdev_start_xmit(skb, dev, txq, more); trace_net_dev_xmit(skb, rc, dev, len); @@ -3868,6 +3939,7 @@ qdisc_run_end(q); rc = NET_XMIT_SUCCESS; } else { + avm_pa_mark_shaped(skb); rc = dev_qdisc_enqueue(skb, q, &to_free, txq); if (qdisc_run_begin(q)) { if (unlikely(contended)) { @@ -4114,6 +4186,34 @@ return netdev_get_tx_queue(dev, queue_index); } +static int (*toe_dev_queue_xmit_hook)(struct sk_buff *skb) = NULL; + +int toe_xmit_hook_register(int (*hook_fn)(struct sk_buff *)) +{ + if (unlikely(toe_dev_queue_xmit_hook)) { + pr_warn("toe_dev_queue_xmit_hook already registered!"); + return -EPERM; + } + + toe_dev_queue_xmit_hook = hook_fn; + pr_info("toe_ingress_hook function registered (=%p)", hook_fn); + return 0; +} +EXPORT_SYMBOL(toe_xmit_hook_register); + +int toe_xmit_hook_unregister(void) +{ + if (unlikely(!toe_dev_queue_xmit_hook)) { + pr_warn("toe_dev_queue_xmit_hook not registered!"); + return -EPERM; + } + + toe_dev_queue_xmit_hook = NULL; + pr_info("toe_dev_queue_xmit_hook function unregistered"); + return 0; +} +EXPORT_SYMBOL(toe_xmit_hook_unregister); + /** * __dev_queue_xmit - transmit a buffer * @skb: buffer to transmit @@ -4142,13 +4242,42 @@ */ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) { - struct net_device *dev = skb->dev; + struct net_device *dev; struct netdev_queue *txq; struct Qdisc *q; int rc = -ENOMEM; bool again = false; +#ifdef CONFIG_AVM_NET_DEBUG_SKBUFF_LEAK + skb_track_caller(skb); +#endif + skb_reset_mac_header(skb); + + if (toe_dev_queue_xmit_hook) + toe_dev_queue_xmit_hook(skb); + + dev = skb->dev; + + /* Print the Message on the console indicating that the meta-data is succesfully available + * till the core networking device layers. */ +#ifdef CONFIG_TI_META_DATA_CONSOLE_DUMP + if (skb->ti_meta_info != 0x0) + printk ("Core Networking Device Layer: %s SKB 0x%p has META Data 0x%x\n", skb->dev->name, skb, skb->ti_meta_info); +#endif /* CONFIG_TI_META_DATA_CONSOLE_DUMP */ +#ifdef CONFIG_TI_GW_EGRESS_HOOK + if (ti_gw_egress_hook_handler(dev, skb) < 0) + return rc; +#endif /* CONFIG_TI_GW_EGRESS_HOOK */ +#ifdef CONFIG_TI_EGRESS_HOOK + if (ti_egress_hook_handler(dev, skb) < 0) + return rc; +#endif /* CONFIG_TI_EGRESS_HOOK */ +#ifdef CONFIG_TI_DOCSIS_EGRESS_HOOK + if (ti_docsis_egress_hook_handler(dev, skb) < 0) + return rc; +#endif /* CONFIG_TI_DOCSIS_EGRESS_HOOK */ + skb_assert_len(skb); if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) @@ -4159,6 +4288,11 @@ */ rcu_read_lock_bh(); +#if IS_ENABLED(CONFIG_MCAST_HELPER) + if (mcast_helper_sig_check_update_ptr != NULL) { + mcast_helper_sig_check_update_ptr(skb); + } +#endif skb_update_prio(skb); qdisc_pkt_len_init(skb); @@ -4896,6 +5030,10 @@ { int ret; +#ifdef CONFIG_AVM_NET_DEBUG_SKBUFF_LEAK + skb_track_caller(skb); +#endif + net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb); trace_netif_rx(skb); @@ -5270,6 +5408,17 @@ pt_prev = NULL; another_round: +#ifdef CONFIG_TI_DEVICE_PROTOCOL_HANDLING + /* Pass the packet to the device specific protocol handler */ + if (ti_protocol_handler (skb->dev, skb) < 0) + { + /* Device Specific Protocol handler has "captured" the packet + * and does not want to send it up the networking stack; so + * return immediately. */ + ret = NET_RX_SUCCESS; + goto out; + } +#endif /* CONFIG_TI_DEVICE_PROTOCOL_HANDLING */ skb->skb_iif = skb->dev->ifindex; __this_cpu_inc(softnet_data.processed); @@ -5332,6 +5481,25 @@ if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) goto drop; +#ifdef CONFIG_AVM_NET_DEBUG_SKBUFF_LEAK + skb_track_funccall(skb, avm_pa_dev_receive); +#endif + + if (avm_pa_dev_receive(AVM_PA_DEVINFO(skb->dev), skb) == 0) { + ret = NET_RX_SUCCESS; + goto out; + } + +#ifdef CONFIG_AVM_RECV_HOOKS + if (avm_early_recvhook && (*avm_early_recvhook)(skb)) { + /* + * paket consumed by hook + */ + ret = NET_RX_SUCCESS; + goto out; + } +#endif + if (skb_vlan_tag_present(skb)) { if (pt_prev) { ret = deliver_skb(skb, pt_prev, orig_dev); @@ -5365,6 +5533,16 @@ } } +#ifdef CONFIG_AVM_RECV_HOOKS + if (avm_recvhook && (*avm_recvhook)(skb)) { + /* + * paket consumed by hook + */ + ret = NET_RX_SUCCESS; + goto out; + } +#endif + if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) { check_vlan_id: if (skb_vlan_tag_get_id(skb)) { @@ -9047,7 +9225,12 @@ } EXPORT_SYMBOL(dev_set_mac_address); +#ifdef CONFIG_OPENVSWITCH_BRCOMPAT +DECLARE_RWSEM(dev_addr_sem); +EXPORT_SYMBOL(dev_addr_sem); +#else static DECLARE_RWSEM(dev_addr_sem); +#endif int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, struct netlink_ext_ack *extack) @@ -9791,6 +9974,14 @@ */ static int dev_new_index(struct net *net) { +#ifdef CONFIG_TI_DEVICE_INDEX_REUSE + /* Original implementation does not limit the number of indexes that + * can be allocated. This can cause data overflow. + * The "index reuse feature" limits the number of devices to 32 and reuses + * freed up indexes. + */ + return (ti_dev_new_index(net)); +#else int ifindex = net->ifindex; for (;;) { @@ -9799,6 +9990,7 @@ if (!__dev_get_by_index(net, ifindex)) return net->ifindex = ifindex; } +#endif /* CONFIG_TI_DEVICE_INDEX_REUSE */ } /* Delayed registration/unregisteration */ @@ -10272,8 +10464,22 @@ } ret = -EBUSY; - if (!dev->ifindex) + if (!dev->ifindex) { dev->ifindex = dev_new_index(net); +#ifdef CONFIG_TI_DEVICE_INDEX_REUSE + /* Original dev_new_index() implementation gaurantees a unique device + * index by not limiting on the number of devices that can be registered. + * The "index reuse feature" limits the number of devices to 32. Free + * the allocated divert_blk + */ + + if (dev->ifindex == -1) + { + ret = -EINVAL; + goto err_uninit; + } +#endif /* CONFIG_TI_DEVICE_INDEX_REUSE */ + } else if (__dev_get_by_index(net, dev->ifindex)) goto err_uninit; @@ -10344,6 +10550,10 @@ dev_init_scheduler(dev); dev_hold(dev); +#ifdef CONFIG_TI_L2_SELECTIVE_FORWARDER + /* Store the netdevice pointer in global array */ + ti_save_netdevice_info(dev); +#endif /* CONFIG_TI_L2_SELECTIVE_FORWARDER */ list_netdevice(dev); add_device_randomness(dev->dev_addr, dev->addr_len); @@ -10608,6 +10818,7 @@ linkwatch_forget_dev(dev); netdev_wait_allrefs(dev); + avm_pa_dev_unregister_sync(AVM_PA_DEVINFO(dev)); /* paranoia */ BUG_ON(netdev_refcnt_read(dev) != 1); @@ -10681,6 +10892,12 @@ storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped); storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped); storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler); + +#ifdef CONFIG_TI_PACKET_PROCESSOR + if (dev->add_offload_stats) + dev->add_offload_stats(dev, storage); +#endif + return storage; } EXPORT_SYMBOL(dev_get_stats); @@ -10858,6 +11075,30 @@ hash_init(dev->qdisc_hash); #endif dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; + +#ifdef CONFIG_TI_PACKET_PROCESSOR + /* Initialize the PID and VPID handle. By default the devices are not attached + * to the Packet Processor PDSP. The PID handle will be overwritten by the + * driver if this networking device is also a PHYSICAL device. The VPID handle + * is overwritten by the PROFILE if this networking device is a networking + * endpoint i.e. connected to the bridge or IP stack. */ + dev->pid_handle = -1; + dev->vpid_handle = -1; + memset ((void *)&dev->vpid_block, 0xFF, sizeof(dev->vpid_block)); +#if !(PUMA7_OR_NEWER_SOC_TYPE) + dev->vpid_block.qos_clusters_count = 0; +#endif +#ifdef CONFIG_MACH_PUMA5 + dev->vpid_block.priv_vpid_flags = 0; +#else + dev->vpid_block.flags = 0; +#endif + dev->qos_virtual_scheme_idx = NETDEV_PP_QOS_PROFILE_DEFAULT; +#endif /* CONFIG_TI_PACKET_PROCESSOR */ + strcpy(dev->name, name); + + avm_pa_dev_init(AVM_PA_DEVINFO(dev)); + setup(dev); if (!dev->tx_queue_len) { @@ -10875,7 +11116,6 @@ if (netif_alloc_rx_queues(dev)) goto free_all; - strcpy(dev->name, name); dev->name_assign_type = name_assign_type; dev->group = INIT_NETDEV_GROUP; if (!dev->ethtool_ops) @@ -10988,6 +11228,10 @@ void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) { ASSERT_RTNL(); +#ifdef CONFIG_TI_L2_SELECTIVE_FORWARDER + /* Clear the netdevice pointer stored in the global array */ + ti_free_netdevice_info(dev); +#endif /* CONFIG_TI_L2_SELECTIVE_FORWARDER */ if (head) { list_move_tail(&dev->unreg_list, head); @@ -11632,6 +11876,32 @@ if (register_pernet_subsys(&netdev_net_ops)) goto out; +#ifdef CONFIG_TI_PACKET_PROCESSOR + /* Initialize the HIL Core Layer. */ + if (ti_hil_initialize() < 0) + { + printk ("Error: Unable to initialize the HIL Core\n"); + return -1; + } +#endif /* CONFIG_TI_PACKET_PROCESSOR */ +#ifdef CONFIG_TI_HIL_PROFILE_INTRUSIVE + { + extern TI_HIL_PROFILE hil_intrusive_profile; + + /* Load the Intrusive mode HIL Profile for the system */ + if (ti_hil_register_profile(&hil_intrusive_profile) < 0) + return -1; + } +#endif /* CONFIG_TI_HIL_PROFILE_INTRUSIVE */ +#ifdef CONFIG_TI_HIL_PROFILE_STATIC + { + extern TI_HIL_PROFILE hil_static_profile; + + /* Load the Static HIL Profile for the system */ + if (ti_hil_register_profile(&hil_static_profile) < 0) + return -1; + } +#endif /* CONFIG_TI_HIL_PROFILE_STATIC */ /* * Initialise the packet receive queues. */