--- zzzz-none-000/linux-3.10.107/net/core/skbuff.c 2017-06-27 09:49:32.000000000 +0000 +++ vr9-7490-729/linux-3.10.107/net/core/skbuff.c 2021-11-10 11:53:56.000000000 +0000 @@ -36,6 +36,12 @@ * The functions in this file will not compile correctly with gcc 2.4.x */ +/** + * Some part of this file is modified by Ikanos Communications. + * + * Copyright (C) 2013-2014 Ikanos Communications. + */ + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include @@ -73,6 +79,21 @@ #include #include +#ifdef CONFIG_AVM_PA +#include +#endif + +// AVM/TKL: MERGE fixed relative include paths +#if defined(CONFIG_FUSIV_ENABLE_MBUF_AP) || defined(CONFIG_FUSIV_ENABLE_AP_MBUF) +#include +#include +#include +#include +unsigned char* (*ap_get_cluster_ptr)(struct sk_buff *skbuff, int size) = NULL; +void (*putCluster_ptr)(void *ulPtr) = NULL; +unsigned char* (*ap_get_cluster_without_skbrst_ptr)(struct sk_buff *skbuff, int size) = NULL; +static struct sk_buff *ap_get_skb_cluster(unsigned int length, gfp_t gfp_mask); +#endif struct kmem_cache *skbuff_head_cache __read_mostly; static struct kmem_cache *skbuff_fclone_cache __read_mostly; @@ -156,7 +177,7 @@ struct sk_buff *skb; /* Get the HEAD */ - skb = kmem_cache_alloc_node(skbuff_head_cache, + skb = kmem_cache_alloc_node(avm_skb_get_cachep(), gfp_mask & ~__GFP_DMA, node); if (!skb) goto out; @@ -205,7 +226,7 @@ bool pfmemalloc; cache = (flags & SKB_ALLOC_FCLONE) - ? skbuff_fclone_cache : skbuff_head_cache; + ? skbuff_fclone_cache : avm_skb_get_cachep(); if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) gfp_mask |= __GFP_MEMALLOC; @@ -222,6 +243,17 @@ * Both skb->head and skb_shared_info are cache line aligned. */ size = SKB_DATA_ALIGN(size); +#if IS_ENABLED(CONFIG_FUSIV_ENABLE_MBUF_AP) || IS_ENABLED(CONFIG_FUSIV_ENABLE_AP_MBUF) + if( ap_get_cluster_ptr != NULL ) { + data = (u8*)(*ap_get_cluster_ptr)(skb,size + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(PREHDR_SIZE + AP_PRE_LEAD_UNUSED_LEN) - (PREHDR_SIZE + AP_PRE_LEAD_UNUSED_LEN)); + if(data) + pfmemalloc = false; + } + if(data == NULL) +#endif + { size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); if (!data) @@ -239,7 +271,11 @@ * the tail pointer in struct sk_buff! */ memset(skb, 0, offsetof(struct sk_buff, tail)); + } /* Account for allocated memory : skb + skb->head */ +#ifdef CONFIG_NET_DEBUG_SKBUFF_LEAK + skb_track_caller(skb); +#endif skb->truesize = SKB_TRUESIZE(size); skb->pfmemalloc = pfmemalloc; atomic_set(&skb->users, 1); @@ -247,6 +283,12 @@ skb->data = data; skb_reset_tail_pointer(skb); skb->end = skb->tail + size; +#if IS_ENABLED(CONFIG_FUSIV_ENABLE_MBUF_AP) || IS_ENABLED(CONFIG_FUSIV_ENABLE_AP_MBUF) + if(skb->apAllocAddr) { + skb->end += SKB_DATA_ALIGN(PREHDR_SIZE + AP_PRE_LEAD_UNUSED_LEN) - (PREHDR_SIZE + AP_PRE_LEAD_UNUSED_LEN); + } +#endif + #ifdef NET_SKBUFF_DATA_USES_OFFSET skb->mac_header = ~0U; skb->transport_header = ~0U; @@ -302,7 +344,7 @@ struct sk_buff *skb; unsigned int size = frag_size ? : ksize(data); - skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); + skb = kmem_cache_alloc(avm_skb_get_cachep(), GFP_ATOMIC); if (!skb) return NULL; @@ -418,6 +460,10 @@ unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +#if IS_ENABLED(CONFIG_FUSIV_ENABLE_MBUF_AP) || IS_ENABLED(CONFIG_FUSIV_ENABLE_AP_MBUF) + skb = ap_get_skb_cluster(length + NET_SKB_PAD, gfp_mask); + if(!skb) { +#endif if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { void *data; @@ -435,6 +481,9 @@ skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); } +#if IS_ENABLED(CONFIG_FUSIV_ENABLE_MBUF_AP) || IS_ENABLED(CONFIG_FUSIV_ENABLE_AP_MBUF) + } +#endif if (likely(skb)) { skb_reserve(skb, NET_SKB_PAD); skb->dev = dev; @@ -443,6 +492,25 @@ } EXPORT_SYMBOL(__netdev_alloc_skb); +// AVM/TKL: MERGE added unconditionally by Ikanos +#if defined(CONFIG_MACH_FUSIV) +struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length, gfp_t gfp) +{ + struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); + +#ifdef CONFIG_ETHERNET_PACKET_MANGLE + if (dev && (dev->priv_flags & IFF_NO_IP_ALIGN)) + return skb; +#endif + + if (NET_IP_ALIGN && skb) + skb_reserve(skb, NET_IP_ALIGN); + return skb; +} +EXPORT_SYMBOL(__netdev_alloc_skb_ip_align); +#endif // defined(CONFIG_MACH_FUSIV) + void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, int size, unsigned int truesize) { @@ -474,6 +542,22 @@ static void skb_free_head(struct sk_buff *skb) { + /* Below code is to free AP Cluster. We don't want to free + the cluster if it is in Tx Path as it will be freed + in AP. For Tx , the pointer will have the value 1 + */ +#if IS_ENABLED(CONFIG_FUSIV_ENABLE_MBUF_AP) || IS_ENABLED(CONFIG_FUSIV_ENABLE_AP_MBUF) + if(SKB_APBUF_ALLOCATED (skb)) { + if(putCluster_ptr != NULL) + (*putCluster_ptr)(skb->apAllocAddr); + //else + // printk("\n skbuff1: fusivlib_lkm not initilizedproperly...\n"); + skb->apAllocAddr = SKB_APBUF_UNALLOCATED; + } + else if(skb->apAllocAddr == SKB_APBUF_STEAL) + skb->apAllocAddr = SKB_APBUF_UNALLOCATED; + else +#endif if (skb->head_frag) put_page(virt_to_head_page(skb->head)); else @@ -520,7 +604,11 @@ switch (skb->fclone) { case SKB_FCLONE_UNAVAILABLE: +#if defined(CONFIG_AVM_SKB_CACHE_SPLIT) + kfree(skb); /*--- find out your special cachep ---*/ +#else/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ kmem_cache_free(skbuff_head_cache, skb); +#endif/*--- #else ---*//*--- #if defined(CONFIG_AVM_SKB_CACHE_SPLIT) ---*/ break; case SKB_FCLONE_ORIG: @@ -554,6 +642,9 @@ WARN_ON(in_irq()); skb->destructor(skb); } +#if defined(CONFIG_AVM_GENERIC_CONNTRACK) + generic_ct_put(skb->generic_ct); +#endif #if IS_ENABLED(CONFIG_NF_CONNTRACK) nf_conntrack_put(skb->nfct); #endif @@ -589,6 +680,13 @@ void __kfree_skb(struct sk_buff *skb) { skb_release_all(skb); +#if defined(CONFIG_FUSIV_KERNEL_AP_2_AP) || defined(CONFIG_FUSIV_KERNEL_AP_2_AP_MODULE) + memset(&skb->apFlowData, 0, sizeof(skb->apFlowData)); +#else +#if defined(CONFIG_FUSIV_KERNEL_HOST_IPQOS) || defined(CONFIG_FUSIV_KERNEL_HOST_IPQOS_MODULE) + memset(&skb->qosInfo, 0, sizeof(skb->qosInfo)); +#endif +#endif kfree_skbmem(skb); } EXPORT_SYMBOL(__kfree_skb); @@ -669,6 +767,9 @@ { new->tstamp = old->tstamp; new->dev = old->dev; +#if IS_ENABLED(CONFIG_AVM_NET_SKB_INPUT_DEV) + new->input_dev = old->input_dev; +#endif new->transport_header = old->transport_header; new->network_header = old->network_header; new->mac_header = old->mac_header; @@ -685,6 +786,10 @@ new->sp = secpath_get(old->sp); #endif memcpy(new->cb, old->cb, sizeof(old->cb)); +#ifdef CONFIG_AVM_PA + memcpy(&new->avm_pa.pktinfo, &old->avm_pa.pktinfo, + sizeof(old->avm_pa.pktinfo)); +#endif new->csum = old->csum; new->local_df = old->local_df; new->pkt_type = old->pkt_type; @@ -738,12 +843,23 @@ C(head); C(head_frag); C(data); +#if IS_ENABLED(CONFIG_FUSIV_ENABLE_MBUF_AP) || IS_ENABLED(CONFIG_FUSIV_ENABLE_AP_MBUF) + C(apAllocAddr); +#endif C(truesize); atomic_set(&n->users, 1); atomic_inc(&(skb_shinfo(skb)->dataref)); skb->cloned = 1; +#if defined(CONFIG_FUSIV_KERNEL_AP_2_AP) || defined(CONFIG_FUSIV_KERNEL_AP_2_AP_MODULE) + C(apFlowData); +#else +#if defined(CONFIG_FUSIV_KERNEL_HOST_IPQOS) || defined(CONFIG_FUSIV_KERNEL_HOST_IPQOS_MODULE) + C(qosInfo); +#endif +#endif + return n; #undef C } @@ -857,7 +973,7 @@ if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; - n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); + n = kmem_cache_alloc(avm_skb_get_cachep(), gfp_mask); if (!n) return NULL; @@ -893,6 +1009,14 @@ __copy_skb_header(new, old); +#if defined(CONFIG_FUSIV_KERNEL_AP_2_AP) || defined(CONFIG_FUSIV_KERNEL_AP_2_AP_MODULE) + memcpy(&new->apFlowData, &old->apFlowData, sizeof(old->apFlowData)); +#else +#if defined(CONFIG_FUSIV_KERNEL_HOST_IPQOS) || defined(CONFIG_FUSIV_KERNEL_HOST_IPQOS_MODULE) + memcpy(&new->qosInfo, &old->qosInfo, sizeof(old->qosInfo)); +#endif +#endif + #ifndef NET_SKBUFF_DATA_USES_OFFSET skb_headers_offset_update(new, offset); #endif @@ -1031,7 +1155,12 @@ u8 *data; int size = nhead + skb_end_offset(skb) + ntail; long off; +#if IS_ENABLED(CONFIG_FUSIV_ENABLE_MBUF_AP) || IS_ENABLED(CONFIG_FUSIV_ENABLE_AP_MBUF) + unsigned char* apAllocAddrOld = NULL; + int clusterAssigned=0; + data = NULL; +#endif BUG_ON(nhead < 0); if (skb_shared(skb)) @@ -1041,17 +1170,74 @@ if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; +#if IS_ENABLED(CONFIG_FUSIV_ENABLE_MBUF_AP) || IS_ENABLED(CONFIG_FUSIV_ENABLE_AP_MBUF) + if( (ap_get_cluster_without_skbrst_ptr != NULL) && (skb->apAllocAddr != SKB_APBUF_UNALLOCATED)) { + apAllocAddrOld = skb->apAllocAddr; + data = (unsigned char*)(*ap_get_cluster_without_skbrst_ptr)(skb,size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(PREHDR_SIZE + AP_PRE_LEAD_UNUSED_LEN) - (PREHDR_SIZE + AP_PRE_LEAD_UNUSED_LEN)); + + } + + if (data == NULL) + { + data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), + gfp_mask, NUMA_NO_NODE, NULL); + if (!data) + goto nodata; + + size = SKB_WITH_OVERHEAD(ksize(data)); + } + else + { + unsigned char *temp; + clusterAssigned = 1; + temp = skb->apAllocAddr; + skb->apAllocAddr = apAllocAddrOld; + apAllocAddrOld = temp; +/* +Bug:28035 +In Case of Dual Core Linux Mode +The Clusters will be allocated from the CMIPS +Though the address is accesible by HOST this address will no be visible to Host Linux +Memory Manager.As the amount of memory allocated for clsuster is Fixed +We return that. +size = SKB_WITH_OVERHEAD(ksize(data)) will give MAX_APBUF_SIZE- size of skb_shared_info when +cluster is allocated by AP. +*/ + +#if defined(CONFIG_FUSIV_MIPS_DUALCORE) && !defined(CONFIG_FUSIV_MIPS_CMIPS_CORE) + size = SKB_WITH_OVERHEAD(AP_BUFFER_SIZE); +#else + size = SKB_WITH_OVERHEAD(ksize(data)); +#endif + + } +#else data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), gfp_mask, NUMA_NO_NODE, NULL); if (!data) goto nodata; size = SKB_WITH_OVERHEAD(ksize(data)); +#endif + /* Copy only real data... and, alas, header. This should be * optimized for the cases when header is void. */ memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); - +#if IS_ENABLED(CONFIG_FUSIV_ENABLE_MBUF_AP) || IS_ENABLED(CONFIG_FUSIV_ENABLE_AP_MBUF) + /* When cluster is allocated by AP, put skb_shared_info exactly at the end of allocated zone and + * adjust skb->end accordingly. + * Size is align to cache line address. So use apAllocAddrOld + size to copy skb_shared_info + * to new location which is aligned to cache line address. + */ + if(clusterAssigned == 1){ + memcpy((struct skb_shared_info *)(apAllocAddrOld + size), + skb_shinfo(skb), + offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); + } + else +#endif memcpy((struct skb_shared_info *)(data + size), skb_shinfo(skb), offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); @@ -1093,6 +1279,15 @@ skb->csum_start += nhead; skb->cloned = 0; skb->hdr_len = 0; +#if IS_ENABLED(CONFIG_FUSIV_ENABLE_MBUF_AP) || IS_ENABLED(CONFIG_FUSIV_ENABLE_AP_MBUF) + if(clusterAssigned == 1) { + skb->apAllocAddr = apAllocAddrOld; + /* Adjust skb->end to align to cache line address */ + skb->end = apAllocAddrOld + size; + } + else + skb->apAllocAddr = SKB_APBUF_UNALLOCATED; +#endif skb->nohdr = 0; atomic_set(&skb_shinfo(skb)->dataref, 1); return 0; @@ -3055,12 +3250,30 @@ } EXPORT_SYMBOL_GPL(skb_gro_receive); +#if defined(CONFIG_AVM_SKB_CACHE_SPLIT) +struct kmem_cache *skb_cache_create_per_swirq(const char *tasklet_name) { + char cache_name[64]; + snprintf(cache_name, sizeof(cache_name), "skbuff_hc_%s", tasklet_name); + return kmem_cache_create(cache_name, + sizeof(struct sk_buff), + 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC + | SLAB_POISON + , + NULL); +} +#endif/*--- #if defined(CONFIG_AVM_SKB_CACHE_SPLIT) ---*/ + void __init skb_init(void) { skbuff_head_cache = kmem_cache_create("skbuff_head_cache", sizeof(struct sk_buff), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, + SLAB_HWCACHE_ALIGN|SLAB_PANIC +#if defined(CONFIG_DEBUG_SLAB_AVM_LITE) + | SLAB_POISON +#endif/*--- #if defined(CONFIG_DEBUG_SLAB_AVM_LITE) ---*/ + , NULL); skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", (2*sizeof(struct sk_buff)) + @@ -3068,6 +3281,9 @@ 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); +#if defined(CONFIG_AVM_GENERIC_CONNTRACK) + generic_ct_init(); +#endif } /** @@ -3392,7 +3608,11 @@ { if (head_stolen) { skb_release_head_state(skb); +#if defined(CONFIG_AVM_SKB_CACHE_SPLIT) + kfree(skb); /*--- find out your special cachep ---*/ +#else/*--- #if defined(CONFIG_AVM_SKB_CACHE_SPLIT) ---*/ kmem_cache_free(skbuff_head_cache, skb); +#endif/*--- #else ---*//*--- #if defined(CONFIG_AVM_SKB_CACHE_SPLIT) ---*/ } else { __kfree_skb(skb); } @@ -3477,6 +3697,7 @@ } EXPORT_SYMBOL(skb_try_coalesce); + /** * skb_gso_transport_seglen - Return length of individual segments of a gso packet * @@ -3501,3 +3722,114 @@ return shinfo->gso_size; } EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); + +#if IS_ENABLED(CONFIG_FUSIV_ENABLE_AP_MBUF) || IS_ENABLED(CONFIG_FUSIV_ENABLE_MBUF_AP) +struct sk_buff *ap_get_skb(char *buf, unsigned int size) +{ + struct sk_buff *skb = NULL; + struct skb_shared_info *shinfo; + u8 *data; + + /* Commenting the size to increment 16 bytes, to support receiving + of 1728 byte packet to a single skbuff */ + //size += 16; + /* Get the HEAD */ + skb = kmem_cache_alloc(avm_skb_get_cachep(), GFP_ATOMIC & ~__GFP_DMA); + if (!skb) + { + printk("phm_devget : alloc_skb() Failed. \n"); + return NULL; + } + /* Get the DATA. Size must match skb_add_mtu(). */ + size = SKB_DATA_ALIGN(size); + data = buf; + memset(skb,0,offsetof(struct sk_buff, tail)); + skb->truesize = SKB_TRUESIZE(size); + atomic_set(&skb->users, 1); + skb->head = data+ PREHDR_SIZE; + skb->data = skb->head; + skb->end = data + size; + skb->end += SKB_DATA_ALIGN(PREHDR_SIZE + AP_PRE_LEAD_UNUSED_LEN) - (PREHDR_SIZE + AP_PRE_LEAD_UNUSED_LEN); + skb->tail = skb->end; +/* make sure we initialize shinfo sequentially */ + shinfo = skb_shinfo(skb); + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); + atomic_set(&shinfo->dataref, 1); + kmemcheck_annotate_variable(shinfo->destructor_arg); +#if 0 + shinfo->nr_frags = 0; + shinfo->gso_size = 0; + shinfo->gso_segs = 0; + shinfo->gso_type = 0; + shinfo->ip6_frag_id = 0; + shinfo->frag_list = NULL; +#endif + skb_reserve(skb, 16); + return skb; +} + +static struct sk_buff *ap_get_skb_cluster(unsigned int size, gfp_t gfp_mask) +{ + struct skb_shared_info *shinfo; + struct sk_buff *skb; + u8 *data=NULL; + + /* Get the HEAD */ + skb = kmem_cache_alloc_node(avm_skb_get_cachep(), gfp_mask & ~__GFP_DMA, NUMA_NO_NODE); + if (!skb) + goto out; + prefetchw(skb); + + /* We do our best to align skb_shared_info on a separate cache + * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives + * aligned memory blocks, unless SLUB/SLAB debug is enabled. + * Both skb->head and skb_shared_info are cache line aligned. + */ + size = SKB_DATA_ALIGN(size); + if( ap_get_cluster_ptr != NULL ) { + data = (u8*)(*ap_get_cluster_ptr)(skb,size + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(PREHDR_SIZE + AP_PRE_LEAD_UNUSED_LEN) - (PREHDR_SIZE + AP_PRE_LEAD_UNUSED_LEN)); + } + if (!data) + goto nodata; + //prefetchw(data + size); + + /* Account for allocated memory : skb + skb->head */ + skb->truesize = SKB_TRUESIZE(size); + skb->pfmemalloc = false; + atomic_set(&skb->users, 1); + skb->head = data; + skb->data = data; + skb_reset_tail_pointer(skb); + skb->end = skb->tail + size; + skb->end += SKB_DATA_ALIGN(PREHDR_SIZE + AP_PRE_LEAD_UNUSED_LEN) - (PREHDR_SIZE + AP_PRE_LEAD_UNUSED_LEN); + +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb->mac_header = ~0U; + skb->transport_header = ~0U; +#endif + + /* make sure we initialize shinfo sequentially */ + shinfo = skb_shinfo(skb); + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); + + atomic_set(&shinfo->dataref, 1); + kmemcheck_annotate_variable(shinfo->destructor_arg); + +out: + return skb; +nodata: +#if defined(CONFIG_AVM_SKB_CACHE_SPLIT) + kfree(skb); /*--- find out your special cachep ---*/ +#else/*--- #if defined(CONFIG_AVM_SKB_CACHE_SPLIT) ---*/ + kmem_cache_free(skbuff_head_cache, skb); +#endif/*--- #else ---*//*--- #if defined(CONFIG_AVM_SKB_CACHE_SPLIT) ---*/ + skb = NULL; + goto out; +} +EXPORT_SYMBOL(ap_get_cluster_ptr); +EXPORT_SYMBOL(ap_get_cluster_without_skbrst_ptr); +EXPORT_SYMBOL(putCluster_ptr); +EXPORT_SYMBOL(ap_get_skb); +#endif