--- zzzz-none-000/linux-2.6.39.4/net/core/dev.c 2011-08-03 19:43:28.000000000 +0000 +++ puma6-arm-6490-729/linux-2.6.39.4/net/core/dev.c 2021-11-10 13:23:11.000000000 +0000 @@ -71,6 +71,33 @@ * J Hadi Salim : - Backlog queue sampling * - netif_rx() feedback */ +/*---------------------------------------------------------------------------- +// Copyright 2007, Texas Instruments Incorporated +// +// This program has been modified from its original operation by Texas Instruments +// to do the following: +// +// 1. Device Specific Protocol Handling. +// 2. TI Meta Data Extension Console Dump for debugging. +// 3. Device Index Reuse +// 4. TI Layer 2 Selective Forwarder +// 5. TI Packet Processor Enhancements +// 6. TI Egress Hook Feature. +// +// THIS MODIFIED SOFTWARE AND DOCUMENTATION ARE PROVIDED +// "AS IS," AND TEXAS INSTRUMENTS MAKES NO REPRESENTATIONS +// OR WARRENTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +// TO, WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY +// PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE OR +// DOCUMENTATION WILL NOT INFRINGE ANY THIRD PARTY PATENTS, +// COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS. +// +// These changes are covered as per original license +//-----------------------------------------------------------------------------*/ +/* + * Includes Intel Corporation's changes/modifications dated: [11/07/2011]. +* Changed/modified portions - Copyright © [2011], Intel Corporation. +*/ #include #include @@ -119,6 +146,7 @@ #include #include #include +#include #include #include #include @@ -133,6 +161,10 @@ #include #include #include +#ifdef CONFIG_AVM_PA +#include +#endif +#include #include "net-sysfs.h" @@ -176,6 +208,8 @@ static DEFINE_SPINLOCK(ptype_lock); static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; static struct list_head ptype_all __read_mostly; /* Taps */ +static int (*avm_recvhook)(struct sk_buff *skb); +static int (*avm_early_recvhook)(struct sk_buff *skb); /* * The @dev_base_head list is protected by @dev_base_lock and the rtnl @@ -196,6 +230,7 @@ * unregister_netdevice(), which must be called with the rtnl * semaphore held. */ + DEFINE_RWLOCK(dev_base_lock); EXPORT_SYMBOL(dev_base_lock); @@ -353,12 +388,57 @@ } #endif +#ifdef CONFIG_TI_DEVICE_PROTOCOL_HANDLING +extern int ti_protocol_handler (struct net_device* dev, struct sk_buff *skb); +#endif + +#ifdef CONFIG_TI_DEVICE_INDEX_REUSE +extern int ti_dev_new_index(struct net *net); +#endif /* CONFIG_TI_DEVICE_INDEX_REUSE */ + +#ifdef CONFIG_TI_L2_SELECTIVE_FORWARDER +extern void ti_save_netdevice_info(struct net_device *dev); +extern void ti_free_netdevice_info(struct net_device *dev); +#endif /* CONFIG_TI_L2_SELECTIVE_FORWARDER */ + + +#ifdef CONFIG_TI_EGRESS_HOOK +extern int ti_egress_hook_handler (struct net_device* dev, struct sk_buff *skb); +#endif /* CONFIG_TI_EGRESS_HOOK */ + + +#ifdef CONFIG_TI_DOCSIS_EGRESS_HOOK +extern int ti_docsis_egress_hook_handler (struct net_device* dev, struct sk_buff *skb); +#endif /* CONFIG_TI_DOCSIS_EGRESS_HOOK */ + + +#ifdef CONFIG_TI_GW_EGRESS_HOOK +extern int ti_gw_egress_hook_handler (struct net_device* dev, struct sk_buff *skb); +#endif /* CONFIG_TI_GW_EGRESS_HOOK */ + + /******************************************************************************* Protocol management and registration routines *******************************************************************************/ +void set_avm_recvhook(int (*recvhook)(struct sk_buff *skb)) +{ + spin_lock_bh(&ptype_lock); + avm_recvhook = recvhook; + spin_unlock_bh(&ptype_lock); +} +EXPORT_SYMBOL(set_avm_recvhook); + +void set_avm_early_recvhook(int (*recvhook)(struct sk_buff *skb)) +{ + spin_lock_bh(&ptype_lock); + avm_early_recvhook = recvhook; + spin_unlock_bh(&ptype_lock); +} +EXPORT_SYMBOL(set_avm_early_recvhook); + /* * Add a protocol ID to the list. Now that the input handler is * smarter we can dispense with all the messy stuff that used to be @@ -1112,8 +1192,9 @@ * available in this kernel then it becomes a nop. */ -void dev_load(struct net *net, const char *name) +void dev_load(struct net *net __attribute__ ((unused)), const char *name __attribute__ ((unused))) { +#ifdef CONFIG_NET_DEV_LOAD struct net_device *dev; int no_module; @@ -1130,6 +1211,7 @@ "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s " "instead\n", name); } +#endif /*--- #ifdef CONFIG_NET_DEV_LOAD ---*/ } EXPORT_SYMBOL(dev_load); @@ -1708,6 +1790,7 @@ void dev_kfree_skb_irq(struct sk_buff *skb) { + avm_simple_profiling_skb(NULL, skb); if (atomic_dec_and_test(&skb->users)) { struct softnet_data *sd; unsigned long flags; @@ -1865,17 +1948,24 @@ skb->mac_len = skb->network_header - skb->mac_header; __skb_pull(skb, skb->mac_len); + avm_simple_profiling_skb(NULL, skb); if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { struct net_device *dev = skb->dev; - struct ethtool_drvinfo info = {}; + const char *name =""; - if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) - dev->ethtool_ops->get_drvinfo(dev, &info); + if (dev) { + if (dev->dev.parent) + name = dev_driver_string(dev->dev.parent); + else + name = netdev_name(dev); + } - WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n", - info.driver, dev ? dev->features : 0L, - skb->sk ? skb->sk->sk_route_caps : 0L, - skb->len, skb->data_len, skb->ip_summed); + if (net_ratelimit()) { + WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n", + name, dev ? dev->features : 0L, + skb->sk ? skb->sk->sk_route_caps : 0L, + skb->len, skb->data_len, skb->ip_summed); + } if (skb_header_cloned(skb) && (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) @@ -2089,6 +2179,10 @@ const struct net_device_ops *ops = dev->netdev_ops; int rc = NETDEV_TX_OK; +#ifdef CONFIG_AVM_PA + (void)avm_pa_dev_snoop_transmit(AVM_PA_DEVINFO(dev), skb); +#endif + if (likely(!skb->next)) { u32 features; @@ -2350,10 +2444,12 @@ __qdisc_run(q); } else qdisc_run_end(q); - rc = NET_XMIT_SUCCESS; } else { skb_dst_force(skb); +#if defined(CONFIG_AVM_PA) && defined(AVM_PA_MARK_SHAPED) + avm_pa_mark_shaped(skb); +#endif rc = q->enqueue(skb, q) & NET_XMIT_MASK; if (qdisc_run_begin(q)) { if (unlikely(contended)) { @@ -2404,6 +2500,60 @@ struct Qdisc *q; int rc = -ENOMEM; + skb_track_caller(skb); + + #ifdef KERNEL_PORTING_FIX_ME + /* GSO will handle the following emulations directly. */ + if (netif_needs_gso(dev, skb)) + goto gso; + + if (skb_shinfo(skb)->frag_list && + !(dev->features & NETIF_F_FRAGLIST) && + __skb_linearize(skb)) + goto out_kfree_skb; + + /* Fragmented skb is linearized if device does not support SG, + * or if at least one of fragments is in highmem and device + * does not support DMA from it. + */ + if (skb_shinfo(skb)->nr_frags && + (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) && + __skb_linearize(skb)) + goto out_kfree_skb; + + /* If packet is not checksummed and device does not support + * checksumming for this protocol, complete checksumming here. + */ + if (skb->ip_summed == CHECKSUM_HW && + (!(dev->features & NETIF_F_GEN_CSUM) && + (!(dev->features & NETIF_F_IP_CSUM) || + skb->protocol != htons(ETH_P_IP)))) + if (skb_checksum_help(skb, 0)) + goto out_kfree_skb; + #endif + + /* Print the Message on the console indicating that the meta-data is succesfully available + * till the core networking device layers. */ +#ifdef CONFIG_TI_META_DATA_CONSOLE_DUMP + if (skb->ti_meta_info != 0x0) + printk ("Core Networking Device Layer: %s SKB 0x%p has META Data 0x%x\n", skb->dev->name, skb, skb->ti_meta_info); +#endif /* CONFIG_TI_META_DATA_CONSOLE_DUMP */ + +#ifdef CONFIG_TI_GW_EGRESS_HOOK + if (ti_gw_egress_hook_handler(dev, skb) < 0) + return rc; +#endif /* CONFIG_TI_GW_EGRESS_HOOK */ + +#ifdef CONFIG_TI_EGRESS_HOOK + if (ti_egress_hook_handler(dev, skb) < 0) + return rc; +#endif /* CONFIG_TI_EGRESS_HOOK */ + +#ifdef CONFIG_TI_DOCSIS_EGRESS_HOOK + if (ti_docsis_egress_hook_handler(dev, skb) < 0) + return rc; +#endif /* CONFIG_TI_DOCSIS_EGRESS_HOOK */ + /* Disable soft irqs for various locks below. Also * stops preemption for RCU. */ @@ -2470,6 +2620,9 @@ rc = -ENETDOWN; rcu_read_unlock_bh(); +#ifdef KERNEL_PORTING_FIX_ME +out_kfree_skb: +#endif kfree_skb(skb); return rc; out: @@ -2486,6 +2639,10 @@ int netdev_max_backlog __read_mostly = 1000; int netdev_tstamp_prequeue __read_mostly = 1; int netdev_budget __read_mostly = 300; +#ifdef CONFIG_AVM_NAPI_DRIVER_REGULATION +int netdev_min_weight __read_mostly = 4; +int netdev_max_weight __read_mostly = 128; +#endif int weight_p __read_mostly = 64; /* old backlog weight */ /* Called with irq disabled */ @@ -2851,6 +3008,8 @@ { int ret; + skb_track_caller(skb); + /* if netpoll wants it, pretend we never saw it */ if (netpoll_rx(skb)) return NET_RX_DROP; @@ -3116,6 +3275,7 @@ if (netpoll_receive_skb(skb)) return NET_RX_DROP; + if (!skb->skb_iif) skb->skb_iif = skb->dev->ifindex; orig_dev = skb->dev; @@ -3124,6 +3284,17 @@ skb_reset_transport_header(skb); skb->mac_len = skb->network_header - skb->mac_header; +#ifdef CONFIG_TI_DEVICE_PROTOCOL_HANDLING + /* Pass the packet to the device specific protocol handler */ + if (ti_protocol_handler (skb->dev, skb) < 0) + { + /* Device Specific Protocol handler has "captured" the packet + * and does not want to send it up the networking stack; so + * return immediately. */ + return NET_RX_SUCCESS; + } +#endif /* CONFIG_TI_DEVICE_PROTOCOL_HANDLING */ + pt_prev = NULL; rcu_read_lock(); @@ -3154,6 +3325,33 @@ ncls: #endif +#ifdef CONFIG_AVM_PA + if (avm_pa_dev_receive(AVM_PA_DEVINFO(skb->dev), skb) == 0) { + ret = NET_RX_SUCCESS; + goto out; + } +#endif + + if (avm_early_recvhook && (*avm_early_recvhook)(skb)) { + /* + * paket consumed by hook + */ + ret = NET_RX_SUCCESS; + goto out; + } + + if (vlan_tx_tag_present(skb)) { + if (pt_prev) { + ret = deliver_skb(skb, pt_prev, orig_dev); + pt_prev = NULL; + } + if (vlan_hwaccel_do_receive(&skb)) { + ret = __netif_receive_skb(skb); + goto out; + } else if (unlikely(!skb)) + goto out; + } + rx_handler = rcu_dereference(skb->dev->rx_handler); if (rx_handler) { if (pt_prev) { @@ -3174,16 +3372,12 @@ } } - if (vlan_tx_tag_present(skb)) { - if (pt_prev) { - ret = deliver_skb(skb, pt_prev, orig_dev); - pt_prev = NULL; - } - if (vlan_hwaccel_do_receive(&skb)) { - ret = __netif_receive_skb(skb); - goto out; - } else if (unlikely(!skb)) - goto out; + if (avm_recvhook && (*avm_recvhook)(skb)) { + /* + * paket consumed by hook + */ + ret = NET_RX_SUCCESS; + goto out; } vlan_on_bond_hook(skb); @@ -3641,6 +3835,7 @@ } #endif napi->weight = weight_p; + napi->driver_weight = weight_p; local_irq_disable(); while (work < quota) { struct sk_buff *skb; @@ -3737,6 +3932,7 @@ napi->skb = NULL; napi->poll = poll; napi->weight = weight; + napi->driver_weight = weight; list_add(&napi->dev_list, &dev->napi_list); napi->dev = dev; #ifdef CONFIG_NETPOLL @@ -3765,10 +3961,41 @@ } EXPORT_SYMBOL(netif_napi_del); +#define NAPI_TIME_LIMIT 2 +static inline int do_napi_poll(struct napi_struct *n, int weight) +{ + int work; + unsigned long driver_limit; + + if (!IS_ENABLED(CONFIG_AVM_NAPI_DRIVER_REGULATION)) + return n->poll(n, weight); + + /* The global time limit is NAPI_TIME_LIMIT jiffies, see + * net_rx_action(). Consequently, a single driver must not + * run for any longer than that. + */ + driver_limit = jiffies + NAPI_TIME_LIMIT; + work = n->poll(n, weight); + /* AVM/TMA: In some scenarios a single driver may overrun the + * time limit. Dynamically regulate the driver's weight to + * ensure the global window is not exhausted. + * See JZ-53626 (reproduzierbare Neustarts seit iq17p2 bei VPN Last). + */ + if (time_after(jiffies, driver_limit)) { + n->weight = max(netdev_min_weight, weight / 2); + } else if (time_before(jiffies, driver_limit)) { + int cap = min(netdev_max_weight, n->driver_weight); + + n->weight = min(cap, weight * 2); + } + + return work; +} + static void net_rx_action(struct softirq_action *h) { struct softnet_data *sd = &__get_cpu_var(softnet_data); - unsigned long time_limit = jiffies + 2; + unsigned long time_limit = jiffies + NAPI_TIME_LIMIT; int budget = netdev_budget; void *have; @@ -3806,7 +4033,7 @@ */ work = 0; if (test_bit(NAPI_STATE_SCHED, &n->state)) { - work = n->poll(n, weight); + work = do_napi_poll(n, weight); trace_napi_poll(n); } @@ -5079,6 +5306,14 @@ */ static int dev_new_index(struct net *net) { +#ifdef CONFIG_TI_DEVICE_INDEX_REUSE + /* Original implementation does not limit the number of indexes that + * can be allocated. This can cause data overflow. + * The "index reuse feature" limits the number of devices to 32 and reuses + * freed up indexes. + */ + return (ti_dev_new_index(net)); +#else static int ifindex; for (;;) { if (++ifindex <= 0) @@ -5086,6 +5321,7 @@ if (!__dev_get_by_index(net, ifindex)) return ifindex; } +#endif /* CONFIG_TI_DEVICE_INDEX_REUSE */ } /* Delayed registration/unregisteration */ @@ -5404,6 +5640,20 @@ goto err_uninit; dev->ifindex = dev_new_index(net); +#ifdef CONFIG_TI_DEVICE_INDEX_REUSE + /* Original dev_new_index() implementation gaurantees a unique device + * index by not limiting on the number of devices that can be registered. + * The "index reuse feature" limits the number of devices to 32. Free + * the allocated divert_blk + */ + + if (dev->ifindex == -1) + { + ret = -EINVAL; + goto err_uninit; + } +#endif /* CONFIG_TI_DEVICE_INDEX_REUSE */ + if (dev->iflink == -1) dev->iflink = dev->ifindex; @@ -5441,6 +5691,10 @@ dev_init_scheduler(dev); dev_hold(dev); +#ifdef CONFIG_TI_L2_SELECTIVE_FORWARDER + /* Store the netdevice pointer in global array */ + ti_save_netdevice_info(dev); +#endif /* CONFIG_TI_L2_SELECTIVE_FORWARDER */ list_netdevice(dev); /* Notify protocols, that a new device appeared. */ @@ -5460,7 +5714,6 @@ out: return ret; - err_uninit: if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev); @@ -5672,6 +5925,9 @@ WARN_ON(rcu_dereference_raw(dev->ip6_ptr)); WARN_ON(dev->dn_ptr); +#ifdef CONFIG_AVM_PA + avm_pa_dev_unregister_sync(AVM_PA_DEVINFO(dev)); +#endif if (dev->destructor) dev->destructor(dev); @@ -5821,6 +6077,30 @@ INIT_LIST_HEAD(&dev->unreg_list); INIT_LIST_HEAD(&dev->link_watch_list); dev->priv_flags = IFF_XMIT_DST_RELEASE; +#ifdef CONFIG_TI_PACKET_PROCESSOR + /* Initialize the PID and VPID handle. By default the devices are not attached + * to the Packet Processor PDSP. The PID handle will be overwritten by the + * driver if this networking device is also a PHYSICAL device. The VPID handle + * is overwritten by the PROFILE if this networking device is a networking + * endpoint i.e. connected to the bridge or IP stack. */ + PA_DEVINFO(dev)->pid_handle = -1; + PA_DEVINFO(dev)->vpid_handle = -1; + memset ((void *)&PA_DEVINFO(dev)->vpid_block, 0xFF, sizeof(PA_DEVINFO(dev)->vpid_block)); + PA_DEVINFO(dev)->vpid_block.qos_clusters_count = 0; + +#ifdef CONFIG_MACH_PUMA6 + PA_DEVINFO(dev)->vpid_block.flags = 0; +#else + PA_DEVINFO(dev)->vpid_block.priv_vpid_flags = 0; +#endif + + + PA_DEVINFO(dev)->qos_virtual_scheme_idx = NETDEV_PP_QOS_PROFILE_DEFAULT; +#endif /* CONFIG_TI_PACKET_PROCESSOR */ + +#ifdef CONFIG_AVM_PA + avm_pa_dev_init(AVM_PA_DEVINFO(dev)); +#endif setup(dev); dev->num_tx_queues = txqs; @@ -5932,6 +6212,10 @@ void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) { ASSERT_RTNL(); +#ifdef CONFIG_TI_L2_SELECTIVE_FORWARDER + /* Clear the netdevice pointer stored in the global array */ + ti_free_netdevice_info(dev); +#endif /* CONFIG_TI_L2_SELECTIVE_FORWARDER */ if (head) { list_move_tail(&dev->unreg_list, head); @@ -6408,6 +6692,35 @@ if (register_pernet_subsys(&netdev_net_ops)) goto out; +#ifdef CONFIG_TI_PACKET_PROCESSOR + /* Initialize the HIL Core Layer. */ + if (ti_hil_initialize() < 0) + { + printk ("Error: Unable to initialize the HIL Core\n"); + return -1; + } +#endif /* CONFIG_TI_PACKET_PROCESSOR */ + +#ifdef CONFIG_TI_HIL_PROFILE_INTRUSIVE + { + extern TI_HIL_PROFILE hil_intrusive_profile; + + /* Load the Intrusive mode HIL Profile for the system */ + if (ti_hil_register_profile(&hil_intrusive_profile) < 0) + return -1; + } +#endif /* CONFIG_TI_HIL_PROFILE_INTRUSIVE */ + +#ifdef CONFIG_TI_HIL_PROFILE_STATIC + { + extern TI_HIL_PROFILE hil_static_profile; + + /* Load the Static HIL Profile for the system */ + if (ti_hil_register_profile(&hil_static_profile) < 0) + return -1; + } +#endif /* CONFIG_TI_HIL_PROFILE_STATIC */ + /* * Initialise the packet receive queues. */ @@ -6431,6 +6744,7 @@ sd->backlog.poll = process_backlog; sd->backlog.weight = weight_p; + sd->backlog.driver_weight = weight_p; sd->backlog.gro_list = NULL; sd->backlog.gro_count = 0; }