--- zzzz-none-000/linux-4.1.38/net/core/dev.c 2017-01-18 18:48:06.000000000 +0000 +++ bcm63-7582-715/linux-4.1.38/net/core/dev.c 2020-11-25 10:06:48.000000000 +0000 @@ -138,6 +138,12 @@ #include "net-sysfs.h" +#if defined(CONFIG_BCM_KF_SKB_DEFINES) +#include +#include +#include "skb_defines.h" +#endif + /* Instead of increasing this, you should create a hash table. */ #define MAX_GRO_SKBS 8 @@ -155,6 +161,11 @@ struct net_device *dev, struct netdev_notifier_info *info); +#if 1 /* AVM */ +static int (*avm_recvhook)(struct sk_buff *skb); +static int (*avm_early_recvhook)(struct sk_buff *skb); +#endif + /* * The @dev_base_head list is protected by @dev_base_lock and the rtnl * semaphore. @@ -346,12 +357,100 @@ } #endif +#if defined(CONFIG_BCM_KF_NETDEV_PATH) +/* Adds a NON-ROOT device to a path. A Root device is indirectly + added to a path once another device points to it */ +int netdev_path_add(struct net_device *new_dev, struct net_device *next_dev) +{ + /* new device already in a path, fail */ + if (netdev_path_is_linked(new_dev)) + return -EBUSY; + + netdev_path_next_dev(new_dev) = next_dev; + + next_dev->path.refcount++; + + return 0; +} +EXPORT_SYMBOL(netdev_path_add); + +/* Removes a device from a path */ +int netdev_path_remove(struct net_device *dev) +{ + /* device referenced by one or more interfaces, fail */ + if (!netdev_path_is_leaf(dev)) + return -EBUSY; + + /* device is the first in the list */ + /* Nothing to do */ + if (netdev_path_is_root(dev)) + return 0; + + netdev_path_next_dev(dev)->path.refcount--; + + netdev_path_next_dev(dev) = NULL; + + return 0; +} +EXPORT_SYMBOL(netdev_path_remove); + +/* Prints all devices in a path */ +void netdev_path_dump(struct net_device *dev) +{ + printk("netdev path : "); + + while (1) { + printk("%s", dev->name); + + if (netdev_path_is_root(dev)) + break; + + printk(" -> "); + + dev = netdev_path_next_dev(dev); + } + + printk("\n"); +} +EXPORT_SYMBOL(netdev_path_dump); + +int netdev_path_set_hw_subport_mcast_idx(struct net_device *dev, + unsigned int subport_idx) +{ + if (subport_idx >= NETDEV_PATH_HW_SUBPORTS_MAX) { + printk(KERN_ERR "%s : Invalid subport <%u>, max <%u>", + __func__, subport_idx, NETDEV_PATH_HW_SUBPORTS_MAX); + return -1; + } + + dev->path.hw_subport_mcast_idx = subport_idx; + + return 0; +} +EXPORT_SYMBOL(netdev_path_set_hw_subport_mcast_idx); +#endif /* CONFIG_BCM_KF_NETDEV_PATH */ + + /******************************************************************************* Protocol management and registration routines *******************************************************************************/ +#if 1 /* AVM */ +void set_avm_recvhook(int (*recvhook)(struct sk_buff *skb)) +{ + avm_recvhook = recvhook; +} +EXPORT_SYMBOL(set_avm_recvhook); + +void set_avm_early_recvhook(int (*recvhook)(struct sk_buff *skb)) +{ + avm_early_recvhook = recvhook; +} +EXPORT_SYMBOL(set_avm_early_recvhook); +#endif + /* * Add a protocol ID to the list. Now that the input handler is * smarter we can dispense with all the messy stuff that used to be @@ -971,6 +1070,33 @@ } EXPORT_SYMBOL(dev_valid_name); +#if defined(CONFIG_BCM_KF_FAP) +/* + * Features changed due to FAP power up/down + */ +void dev_change_features(unsigned int features, unsigned int op) +{ + struct net *net; + struct net_device *dev; + + write_lock_bh(&dev_base_lock); + + for_each_net(net) { + for_each_netdev(net, dev) { + if (dev->priv_flags & (IFF_HW_SWITCH | IFF_EBRIDGE)) { + if (op) /* FAP power up = add features */ + dev->features |= features; + else /* FAP powerdown = remove features */ + dev->features &= ~features; + } + } + } + + write_unlock_bh(&dev_base_lock); +} +EXPORT_SYMBOL(dev_change_features); +#endif + /** * __dev_alloc_name - allocate a name for a device * @net: network namespace to allocate the device name in @@ -2298,6 +2424,180 @@ } EXPORT_SYMBOL(__dev_kfree_skb_irq); +#if defined(CONFIG_BCM_KF_SKB_DEFINES) +static struct task_struct *skb_free_task = NULL; +static struct sk_buff *skb_completion_queue = NULL; +static unsigned int skb_completion_queue_cnt = 0; +static DEFINE_SPINLOCK(skbfree_lock); + +/* Setting value to WDF budget + some room for SKBs + * freed by other threads */ +#define MAX_SKB_FREE_BUDGET 256 + +/* the min number of skb to wake up free task */ +static unsigned int skb_free_start_budget __read_mostly; + +static int skb_free_thread_func(void *thread_data) +{ + unsigned int budget; + struct sk_buff *skb; + struct sk_buff *free_list = NULL; + unsigned long flags; + /*wake up periodically for every 20ms */ + unsigned timeout_jiffies = msecs_to_jiffies(20); + + while (!kthread_should_stop()) { + budget = MAX_SKB_FREE_BUDGET; + +update_list: + spin_lock_irqsave(&skbfree_lock, flags); + if (free_list == NULL) { + if (skb_completion_queue) { + free_list = skb_completion_queue; + skb_completion_queue = NULL; + skb_completion_queue_cnt = 0; + } + } + spin_unlock_irqrestore(&skbfree_lock, flags); + + local_bh_disable(); + while (free_list && budget) { + skb = free_list; + free_list = free_list->next; + __kfree_skb(skb); + budget--; + } + local_bh_enable(); + + if (free_list || skb_completion_queue) { + if (budget) + goto update_list; + + /* we still have packets in Q, reschedule the task */ + yield(); + } else { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(timeout_jiffies); + } + } + return 0; +} + +#ifndef SZ_32M +#define SZ_32M 0x02000000 +#endif +#ifndef SZ_64M +#define SZ_64M 0x04000000 +#endif +#ifndef SZ_128M +#define SZ_128M 0x08000000 +#endif +#ifndef SZ_256M +#define SZ_256M 0x10000000 +#endif + +struct task_struct *create_skb_free_task(void) +{ + struct task_struct *tsk; + struct sched_param param; + struct sysinfo sinfo; + + si_meminfo(&sinfo); + + if (sinfo.totalram <= (SZ_32M / sinfo.mem_unit)) + skb_free_start_budget = 16; + else if (sinfo.totalram <= (SZ_64M / sinfo.mem_unit)) + skb_free_start_budget = 32; + else if (sinfo.totalram <= (SZ_128M / sinfo.mem_unit)) + skb_free_start_budget = 64; + else if (sinfo.totalram <= (SZ_256M / sinfo.mem_unit)) + skb_free_start_budget = 128; + else + skb_free_start_budget = 256; + + tsk = kthread_create(skb_free_thread_func, NULL, "skb_free_task"); + + if (IS_ERR(tsk)) { + printk("skb_free_task creation failed\n"); + return NULL; + } + + param.sched_priority = BCM_RTPRIO_DATA_CONTROL; + sched_setscheduler(tsk, SCHED_RR, ¶m); + wake_up_process(tsk); + + printk("skb_free_task created successfully with start budget %d\n", skb_free_start_budget); + return tsk; +} + +/* queue the skb so it can be freed in thread context + * note: this thread is not binded to any cpu,and we rely on scheduler to + * run it on cpu with less load + */ +void dev_kfree_skb_thread(struct sk_buff *skb) +{ + unsigned long flags; + + if (atomic_dec_and_test(&skb->users)) { + spin_lock_irqsave(&skbfree_lock, flags); + skb->next = skb_completion_queue; + skb_completion_queue = skb; + skb_completion_queue_cnt++ ; + spin_unlock_irqrestore(&skbfree_lock, flags); + + if ((skb_free_task->state != TASK_RUNNING) && + (skb_completion_queue_cnt >= skb_free_start_budget)) + wake_up_process(skb_free_task); + } +} +EXPORT_SYMBOL(dev_kfree_skb_thread); + +#include +gbpm_evt_hook_t gbpm_fap_evt_hook_g = (gbpm_evt_hook_t)NULL; +EXPORT_SYMBOL(gbpm_fap_evt_hook_g); +static int fapdrv_thread_func(void *thread_data) +{ + while (!kthread_should_stop()) { + static int scheduled = 0; + local_bh_disable(); + + if (!scheduled) { + printk("gbpm_do_work scheduled\n"); + scheduled = 1; + } + if (gbpm_fap_evt_hook_g) + gbpm_fap_evt_hook_g(); + + local_bh_enable(); + + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + } + return 0; +} + +struct task_struct *fapDrvTask = NULL; +EXPORT_SYMBOL(fapDrvTask); +struct task_struct *create_fapDrvTask(void) +{ + struct sched_param param; + /* Create FAP driver thread for dqm work handling */ + fapDrvTask = kthread_create(fapdrv_thread_func, NULL, "bcmFapDrv"); + + if (IS_ERR(fapDrvTask)) { + printk("fapDrvTask creation failed\n"); + return 0; + } + + param.sched_priority = BCM_RTPRIO_DATA_CONTROL; + sched_setscheduler(fapDrvTask, SCHED_RR, ¶m); + + wake_up_process(fapDrvTask); + return fapDrvTask; +} + +#endif + void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) { if (in_irq() || irqs_disabled()) @@ -2341,7 +2641,11 @@ static void skb_warn_bad_offload(const struct sk_buff *skb) { +#if defined(CONFIG_BCM_KF_DEBUGGING_DISABLED_FIX) + static const netdev_features_t null_features __attribute__((unused)) = 0; +#else static const netdev_features_t null_features = 0; +#endif struct net_device *dev = skb->dev; const char *driver = ""; @@ -2652,7 +2956,50 @@ len = skb->len; trace_net_dev_start_xmit(skb, dev); +#if (defined(CONFIG_BCM_KF_FAP_GSO_LOOPBACK) && defined(CONFIG_BCM_FAP_GSO_LOOPBACK)) + { + unsigned int devId = bcm_is_gso_loopback_dev(dev); + + if(devId && bcm_gso_loopback_hw_offload) + { + if(skb_shinfo(skb)->nr_frags || skb_is_gso(skb) || (skb->ip_summed == CHECKSUM_PARTIAL)) + { + skb->xmit_more = more ? 1 : 0; + rc = bcm_gso_loopback_hw_offload(skb, devId); + if (rc == NETDEV_TX_OK) + txq_trans_update(txq); + } + else if(!skb->recycle_hook) + { + /* To avoid any outof order packets, send all the locally generated packets through + * gso loop back + */ + + /* TODO: we are classifying the traffic as local based on recycle hook. + * But cloned forwarding tarffic can also have recyle_hook as NULL, so this traffic + * will make an extra trip through FAP unnecessarily. But we dont expecet alot + * of traffic in this case. so this shoud be okay for now. Later add a flag + * in skb and mark the skb as local in local_out hook. + */ + skb->xmit_more = more ? 1 : 0; + rc = bcm_gso_loopback_hw_offload(skb, devId); + if (rc == NETDEV_TX_OK) + txq_trans_update(txq); + } + else + { + rc = netdev_start_xmit(skb, dev, txq, more); + } + } + else + { + rc = netdev_start_xmit(skb, dev, txq, more); + } + } + +#else rc = netdev_start_xmit(skb, dev, txq, more); +#endif trace_net_dev_xmit(skb, rc, dev, len); return rc; @@ -2774,6 +3121,61 @@ return head; } +#if defined(CONFIG_BCM_KF_SPDSVC) && (defined(CONFIG_BCM_SPDSVC) || defined(CONFIG_BCM_SPDSVC_MODULE)) +/* Perform GSO and checksum computations on the given SKB, regardless of + * the advertised network interface features */ +int skb_bypass_hw_features(struct sk_buff *skb) +{ + netdev_features_t features; + struct sk_buff *segs; + + features = netif_skb_features(skb); + + features &= ~(NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_UFO); + + if (netif_needs_gso(skb, features)) { + segs = skb_gso_segment(skb, features); + if (IS_ERR(segs)) + goto out_kfree_skb; + else if (segs) { + consume_skb(skb); + skb = segs; + } + } else { + if (skb_needs_linearize(skb, features) && + __skb_linearize(skb)) + goto out_kfree_skb; + + /* If packet is not checksummed and device does not + * support checksumming for this protocol, complete + * checksumming here. + */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->encapsulation) + skb_set_inner_transport_header(skb, + skb_checksum_start_offset(skb)); + else + skb_set_transport_header(skb, + skb_checksum_start_offset(skb)); + if (skb_checksum_help(skb)) + goto out_kfree_skb; + } + } + + return 0; + +out_kfree_skb: + kfree_skb(skb); + return -1; +} +EXPORT_SYMBOL(skb_bypass_hw_features); +#endif + static void qdisc_pkt_len_init(struct sk_buff *skb) { const struct skb_shared_info *shinfo = skb_shinfo(skb); @@ -3382,6 +3784,17 @@ net_timestamp_check(netdev_tstamp_prequeue, skb); +#if defined(CONFIG_BCM_KF_WANDEV) + /* mark IFFWAN flag in skb based on dev->priv_flags */ + if (skb->dev) { + unsigned int mark = skb->mark; + skb->mark |= SKBMARK_SET_IFFWAN_MARK(mark, ((skb->dev->priv_flags & IFF_WANDEV) ? 1:0)); +#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE) + if (skb->blog_p) + skb->blog_p->isWan = (skb->dev->priv_flags & IFF_WANDEV) ? 1 : 0; +#endif /* defined(CONFIG_BLOG_FEATURE) */ + } +#endif /* CONFIG_BCM_KF_WANDEV */ trace_netif_rx(skb); #ifdef CONFIG_RPS if (static_key_false(&rps_needed)) { @@ -3609,6 +4022,18 @@ } EXPORT_SYMBOL_GPL(netdev_rx_handler_register); +#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE)) +int (*bcm_vlan_handle_frame_hook)(struct sk_buff **) = NULL; +#endif +#if (defined(CONFIG_BCM_MCAST) || defined(CONFIG_BCM_MCAST_MODULE)) && defined(CONFIG_BCM_KF_MCAST) +void (*bcm_mcast_def_pri_queue_hook)(struct sk_buff *) = NULL; +EXPORT_SYMBOL(bcm_mcast_def_pri_queue_hook); +#endif +#if defined(CONFIG_BCM_KF_TMS) && defined(CONFIG_BCM_TMS_MODULE) +int (*bcm_1ag_handle_frame_check_hook)(struct sk_buff *) = NULL; +int (*bcm_3ah_handle_frame_check_hook)(struct sk_buff *, struct net_device *) = NULL; +#endif + /** * netdev_rx_handler_unregister - unregister receive handler * @dev: device to unregister a handler from @@ -3657,11 +4082,26 @@ bool deliver_exact = false; int ret = NET_RX_DROP; __be16 type; +#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE)) && defined(CONFIG_BCM_PTP_1588) + struct skb_shared_hwtstamps *timestamp; +#endif net_timestamp_check(!netdev_tstamp_prequeue, skb); trace_netif_receive_skb(skb); +#if defined(CONFIG_BCM_KF_WANDEV) + /*mark IFFWAN flag in skb based on dev->priv_flags */ + if (skb->dev) { + unsigned int mark = skb->mark; + skb->mark |= SKBMARK_SET_IFFWAN_MARK(mark, ((skb->dev->priv_flags & IFF_WANDEV) ? 1:0)); +#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_FEATURE) + if (skb->blog_p) + skb->blog_p->isWan = (skb->dev->priv_flags & IFF_WANDEV) ? 1 : 0; +#endif + } +#endif + orig_dev = skb->dev; skb_reset_network_header(skb); @@ -3671,6 +4111,51 @@ pt_prev = NULL; +#if (defined(CONFIG_BCM_MCAST) || defined(CONFIG_BCM_MCAST_MODULE)) && defined(CONFIG_BCM_KF_MCAST) + if (bcm_mcast_def_pri_queue_hook != NULL) + bcm_mcast_def_pri_queue_hook(skb); +#endif + +#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE)) +#if defined(CONFIG_BCM_KF_TMS) && defined(CONFIG_BCM_TMS_MODULE) + /* Check if 802.1ag service is started. */ + if (bcm_1ag_handle_frame_check_hook) + { + /* We want to skip vlanctl for 1ag packet. */ + if (bcm_1ag_handle_frame_check_hook(skb)) + { + goto skip_vlanctl; + } + } + /* Check if 802.3ah service is started. */ + if (bcm_3ah_handle_frame_check_hook) + { + /* We want to skip vlanctl for 3ah packet, or for any packet when 3ah loopback was enabled. */ + if ((bcm_3ah_handle_frame_check_hook(skb, skb->dev))) + { + goto skip_vlanctl; + } + } +#endif /* #if defined(CONFIG_BCM_TMS_MODULE) */ + + +#if (defined(CONFIG_BCM_PTP_1588)) + timestamp = skb_hwtstamps(skb); + if (timestamp->hwtstamp.tv64 == 0) +#endif + if (bcm_vlan_handle_frame_hook && (ret = bcm_vlan_handle_frame_hook(&skb)) != 0) + goto out; + +#if defined(CONFIG_BCM_KF_TMS) && defined(CONFIG_BCM_TMS_MODULE) +skip_vlanctl: +#endif +#endif + +#if IS_ENABLED(CONFIG_AVM_NET_SKB_INPUT_DEV) + if (!skb->input_dev) + skb->input_dev = skb->dev; +#endif + another_round: skb->skb_iif = skb->dev->ifindex; @@ -3719,6 +4204,16 @@ if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) goto drop; +#if 1 /* AVM */ + if (avm_early_recvhook && (*avm_early_recvhook)(skb)) { + /* + * paket consumed by hook + */ + ret = NET_RX_SUCCESS; + goto out; + } +#endif + if (skb_vlan_tag_present(skb)) { if (pt_prev) { ret = deliver_skb(skb, pt_prev, orig_dev); @@ -3731,6 +4226,18 @@ } rx_handler = rcu_dereference(skb->dev->rx_handler); + +#if defined(CONFIG_BCM_KF_PPP) +#if 1 /* AVM */ +#error Please dont define CONFIG_BCM_KF_PPP for Fritz!OS (see JZ-29857) +#endif + if (skb->protocol == __constant_htons(ETH_P_PPP_SES) || + skb->protocol == __constant_htons(ETH_P_PPP_DISC)) { + if (!memcmp(skb_mac_header(skb), skb->dev->dev_addr, ETH_ALEN)) + goto skip_rx_handler; + } +#endif + if (rx_handler) { if (pt_prev) { ret = deliver_skb(skb, pt_prev, orig_dev); @@ -3758,11 +4265,29 @@ * and set skb->priority like in vlan_do_receive() * For the time being, just ignore Priority Code Point */ +#if defined(CONFIG_BCM_KF_TMS) && defined(CONFIG_BCM_TMS_MODULE) + if (skb->protocol != htons(ETH_P_8021AG) && skb->protocol != htons(ETH_P_8023AH) && + ((struct vlan_hdr *)skb->data)->h_vlan_encapsulated_proto != htons(ETH_P_8021AG)) +#endif skb->vlan_tci = 0; } +#if defined(CONFIG_BCM_KF_PPP) +skip_rx_handler: +#endif + type = skb->protocol; +#if 1 /* AVM */ + if (skb && avm_recvhook && (*avm_recvhook)(skb)) { + /* + * paket consumed by hook + */ + ret = NET_RX_SUCCESS; + goto out; + } +#endif + /* deliver only exact match when indicated */ if (likely(!deliver_exact)) { deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, @@ -5723,7 +6248,11 @@ dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | +#if !defined(CONFIG_BCM_MPTCP) || !defined(CONFIG_BCM_KF_MPTCP) IFF_AUTOMEDIA)) | +#else + IFF_AUTOMEDIA | IFF_NOMULTIPATH | IFF_MPBACKUP)) | +#endif (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | IFF_ALLMULTI)); @@ -5979,7 +6508,31 @@ static int dev_new_index(struct net *net) { int ifindex = net->ifindex; +#if defined(CONFIG_BCM_KF_LIMITED_IFINDEX) + int loop = 0; +#endif + for (;;) { +#if defined(CONFIG_BCM_KF_LIMITED_IFINDEX) +#define BCM_MAX_IFINDEX 128 + + /* On DSL CPE, xtm interfaces are created/deleted when + * the link goes up/down. On a noisy dsl link that goes + * down and up frequently, the xtm ifindex may get higher + * and higher if we don't reuse the lower ifindex values. + * that were released when the interfaces were deleted. + * Therefore, we limit index value to BCM_MAX_IFINDEX, + * and try reuse ifindex that had been released. + */ + WARN_ONCE((++loop > BCM_MAX_IFINDEX), + "Cannot get new ifindex. All %d index values had " + "been used.\n", BCM_MAX_IFINDEX); + /* try reuse ifindex that had been released. */ + if (ifindex >= BCM_MAX_IFINDEX) + ifindex = 0; + +#undef BCM_MAX_IFINDEX +#endif if (++ifindex <= 0) ifindex = 1; if (!__dev_get_by_index(net, ifindex)) @@ -6320,6 +6873,82 @@ return 0; } + +#if (defined(CONFIG_BCM_KF_FAP_GSO_LOOPBACK) && defined(CONFIG_BCM_FAP_GSO_LOOPBACK)) +int (*bcm_gso_loopback_hw_offload)(struct sk_buff *skb, unsigned int txDevId) = NULL; +EXPORT_SYMBOL(bcm_gso_loopback_hw_offload); + +struct net_device *bcm_gso_loopback_devs[BCM_GSO_LOOPBACK_MAXDEVS]; + +void bcm_gso_loopback_devs_init(void) +{ + unsigned int i; + for (i = 0; i < BCM_GSO_LOOPBACK_MAXDEVS; i++) { + bcm_gso_loopback_devs[i] = NULL; + } +} + +static inline void bcm_gso_loopback_add_devptr(struct net_device *dev) +{ + if (!strcmp("wl0", dev->name)) + bcm_gso_loopback_devs[BCM_GSO_LOOPBACK_WL0] = dev; + else if (!strcmp("wl1", dev->name)) + bcm_gso_loopback_devs[BCM_GSO_LOOPBACK_WL1] = dev; + else if (!strcmp("wl2", dev->name)) + bcm_gso_loopback_devs[BCM_GSO_LOOPBACK_WL2] = dev; + else /* not a know device */ + return; + + printk("+++++ Added gso loopback support for dev=%s <%p>\n", + dev->name, dev); +} + +static inline void bcm_gso_loopback_del_devptr(struct net_device *dev) +{ + if (!strcmp("wl0", dev->name)) + bcm_gso_loopback_devs[BCM_GSO_LOOPBACK_WL0] = NULL; + else if (!strcmp("wl1", dev->name)) + bcm_gso_loopback_devs[BCM_GSO_LOOPBACK_WL1] = NULL; + else if (!strcmp("wl2", dev->name)) + bcm_gso_loopback_devs[BCM_GSO_LOOPBACK_WL2] = NULL; + else /* not a know device */ + return; + + printk("------ Removed gso loopback support for dev=%s <%p>\n", + dev->name, dev); +} + +inline unsigned int bcm_is_gso_loopback_dev(void *dev) +{ + int i; + + for (i = 1; i < BCM_GSO_LOOPBACK_MAXDEVS; i++) { + if (bcm_gso_loopback_devs[i] == dev) + return i; + } + + return BCM_GSO_LOOPBACK_NONE; +} + +unsigned int bcm_gso_loopback_devptr2devid(void *dev) +{ + return bcm_is_gso_loopback_dev(dev); +} +EXPORT_SYMBOL(bcm_gso_loopback_devptr2devid); + +struct net_device *bcm_gso_loopback_devid2devptr(unsigned int devId) +{ + if (devId < BCM_GSO_LOOPBACK_MAXDEVS) + return bcm_gso_loopback_devs[devId]; + else { + printk(KERN_ERR "%s: invalid devId<%d> max devs=%d\n", + __func__, devId, BCM_GSO_LOOPBACK_MAXDEVS); + return NULL; + } +} +EXPORT_SYMBOL(bcm_gso_loopback_devid2devptr); +#endif + /** * register_netdevice - register a network device * @dev: device to register @@ -6439,6 +7068,10 @@ if (dev->addr_assign_type == NET_ADDR_PERM) memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); +#if (defined(CONFIG_BCM_KF_FAP_GSO_LOOPBACK) && defined(CONFIG_BCM_FAP_GSO_LOOPBACK)) + bcm_gso_loopback_add_devptr(dev); +#endif + /* Notify protocols, that a new device appeared. */ ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); ret = notifier_to_errno(ret); @@ -6788,6 +7421,9 @@ struct net_device *dev; size_t alloc_size; struct net_device *p; +#if defined(CONFIG_BCM_KF_MISALIGN_MQS) + static int offset = 0; +#endif BUG_ON(strlen(name) >= sizeof(dev->name)); @@ -6812,13 +7448,32 @@ /* ensure 32-byte alignment of whole construct */ alloc_size += NETDEV_ALIGN - 1; +#if defined(CONFIG_BCM_KF_MISALIGN_MQS) + /* KU_TBD - This causes the ethernet driver to panic on boot in register_netdev + * BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); */ + + /* Add an offset to break possible alignment of dev structs in cache */ + /* Note that "offset" is a static variable so it will retain its value */ + /* on each call of this function */ + alloc_size += offset; +#endif + p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); if (!p) p = vzalloc(alloc_size); if (!p) return NULL; +#if defined(CONFIG_BCM_KF_MISALIGN_MQS) + dev = PTR_ALIGN(p, NETDEV_ALIGN) + offset; + /* Increment offset in preparation for the next call to this function */ + /* but don't allow it to increment excessively to avoid wasting memory */ + offset += NETDEV_ALIGN; + if (offset >= 512) + offset -= 512; +#else dev = PTR_ALIGN(p, NETDEV_ALIGN); +#endif dev->padded = (char *)dev - (char *)p; dev->pcpu_refcnt = alloc_percpu(int); @@ -7000,6 +7655,9 @@ void unregister_netdev(struct net_device *dev) { rtnl_lock(); +#if (defined(CONFIG_BCM_KF_FAP_GSO_LOOPBACK) && defined(CONFIG_BCM_FAP_GSO_LOOPBACK)) + bcm_gso_loopback_del_devptr(dev); +#endif unregister_netdevice(dev); rtnl_unlock(); } @@ -7520,9 +8178,153 @@ hotcpu_notifier(dev_cpu_callback, 0); dst_init(); + +#if defined(CONFIG_BCM_KF_SKB_DEFINES) + skb_free_task = create_skb_free_task(); + + create_fapDrvTask(); +#endif + +#if (defined(CONFIG_BCM_KF_FAP_GSO_LOOPBACK) && defined(CONFIG_BCM_FAP_GSO_LOOPBACK)) + bcm_gso_loopback_devs_init(); +#endif rc = 0; out: return rc; } subsys_initcall(net_dev_init); + +#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) +struct net_device_stats *net_dev_collect_stats(struct net_device *dev_p) +{ + BlogStats_t bStats; + BlogStats_t *bStats_p; + struct net_device_stats *dStats_p; + struct net_device_stats *cStats_p; + + if (dev_p == NULL || dev_p->get_stats_pointer == NULL) + return NULL; + + dStats_p = (struct net_device_stats *)dev_p->get_stats_pointer(dev_p, 'd'); + cStats_p = (struct net_device_stats *)dev_p->get_stats_pointer(dev_p, 'c'); + bStats_p = (BlogStats_t *)dev_p->get_stats_pointer(dev_p, 'b'); + + if (dStats_p && cStats_p && bStats_p) { + memset(&bStats, 0, sizeof(BlogStats_t)); + + blog_lock(); + blog_notify(FETCH_NETIF_STATS, (void*)dev_p, + (unsigned long)&bStats, BLOG_PARAM2_NO_CLEAR); + blog_unlock(); + + memcpy(cStats_p, dStats_p, sizeof(struct net_device_stats)); + +#if defined(CONFIG_BCM_KF_EXTSTATS) + /* Handle packet count statistics */ + cStats_p->rx_packets += (bStats.rx_packets + bStats_p->rx_packets); + cStats_p->tx_packets += (bStats.tx_packets + bStats_p->tx_packets); + cStats_p->multicast += (bStats.multicast + bStats_p->multicast); + cStats_p->tx_multicast_packets += (bStats.tx_multicast_packets + + bStats_p->tx_multicast_packets); + /* NOTE: There are no broadcast packets in BlogStats_t since the + * flowcache doesn't accelerate broadcast. Thus, they aren't + * added here */ + + /* set byte counts to 0 if the bstat packet counts are non 0 and + * the octet counts are 0 */ + /* Handle RX byte counts */ + if (((bStats.rx_bytes + bStats_p->rx_bytes) == 0) && + ((bStats.rx_packets + bStats_p->rx_packets) > 0)) + cStats_p->rx_bytes = 0; + else + cStats_p->rx_bytes += bStats.rx_bytes + bStats_p->rx_bytes; + + /* Handle TX byte counts */ + if (((bStats.tx_bytes + bStats_p->tx_bytes) == 0) && + ((bStats.tx_packets + bStats_p->tx_packets) > 0)) + cStats_p->tx_bytes = 0; + else + cStats_p->tx_bytes += bStats.tx_bytes + bStats_p->tx_bytes; + + /* Handle RX multicast byte counts */ + if (((bStats.rx_multicast_bytes + bStats_p->rx_multicast_bytes) == 0) && + ((bStats.multicast + bStats_p->multicast) > 0)) + cStats_p->rx_multicast_bytes = 0; + else + cStats_p->rx_multicast_bytes += bStats.rx_multicast_bytes + + bStats_p->rx_multicast_bytes; + + /* Handle TX multicast byte counts */ + if (((bStats.tx_multicast_bytes + bStats_p->tx_multicast_bytes) == 0) && + ((bStats.tx_multicast_packets + bStats_p->tx_multicast_packets) > 0)) + cStats_p->tx_multicast_bytes = 0; + else + cStats_p->tx_multicast_bytes += bStats.tx_multicast_bytes + + bStats_p->tx_multicast_bytes; +#else + cStats_p->rx_packets += bStats.rx_packets + bStats_p->rx_packets; + cStats_p->tx_packets += bStats.tx_packets + bStats_p->tx_packets; + + /* set byte counts to 0 if the bstat packet counts are non 0 and the + * octet counts are 0 */ + if (((bStats.rx_bytes + bStats_p->rx_bytes) == 0) && + ((bStats.rx_packets + bStats_p->rx_packets) > 0)) + cStats_p->rx_bytes = 0; + else + cStats_p->rx_bytes += bStats.rx_bytes + bStats_p->rx_bytes; + + if (((bStats.tx_bytes + bStats_p->tx_bytes) == 0) && + ((bStats.tx_packets + bStats_p->tx_packets) > 0)) + cStats_p->tx_bytes = 0; + else + cStats_p->tx_bytes += bStats.tx_bytes + bStats_p->tx_bytes; + + cStats_p->multicast += bStats.multicast + bStats_p->multicast; +#endif + + return cStats_p; + } else { + printk("!!!!The device should have three stats, bcd, " + "refer br_netdevice.c\r\n"); + return NULL; + } +} +EXPORT_SYMBOL(net_dev_collect_stats); + +void net_dev_clear_stats(struct net_device *dev_p) +{ + BlogStats_t *bStats_p; + struct net_device_stats *dStats_p; + struct net_device_stats *cStats_p; + + if (dev_p == NULL) + return; + + dStats_p = (struct net_device_stats *)dev_p->get_stats_pointer(dev_p, 'd'); + cStats_p = (struct net_device_stats *)dev_p->get_stats_pointer(dev_p, 'c'); + bStats_p = (BlogStats_t *)dev_p->get_stats_pointer(dev_p, 'b'); + + if (dStats_p && cStats_p && bStats_p) { + blog_lock(); + blog_notify(FETCH_NETIF_STATS, (void*)dev_p, 0, BLOG_PARAM2_DO_CLEAR); + blog_unlock(); + + memset(bStats_p, 0, sizeof(BlogStats_t)); + memset(dStats_p, 0, sizeof(struct net_device_stats)); + memset(cStats_p, 0, sizeof(struct net_device_stats)); + } + + return; +} +EXPORT_SYMBOL(net_dev_clear_stats); +#endif + +#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE)) +EXPORT_SYMBOL(bcm_vlan_handle_frame_hook); +#endif + +#if defined(CONFIG_BCM_KF_TMS) && defined(CONFIG_BCM_TMS_MODULE) +EXPORT_SYMBOL(bcm_1ag_handle_frame_check_hook); +EXPORT_SYMBOL(bcm_3ah_handle_frame_check_hook); +#endif