--- zzzz-none-000/linux-4.9.279/net/core/skbuff.c 2021-08-08 06:38:54.000000000 +0000 +++ puma7-atom-6591-750/linux-4.9.279/net/core/skbuff.c 2023-02-08 11:43:43.000000000 +0000 @@ -33,7 +33,13 @@ */ /* - * The functions in this file will not compile correctly with gcc 2.4.x + * The functions in this file will not compile correctly with gcc 2.4.x + */ + +/* + * Includes Intel Corporation's changes/modifications dated: 2011, 2015, 2018, 2020. + * Changed/modified portions - Copyright (c) 2015-2020, Intel Corporation + * 1. Meta Data Extensions for Packet processor. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -78,10 +84,24 @@ #include #include +#ifndef CONFIG_ARM_AVALANCHE_SOC +#include +#endif + +#include + +int skb_init_intel(struct sk_buff *skb); struct kmem_cache *skbuff_head_cache __read_mostly; static struct kmem_cache *skbuff_fclone_cache __read_mostly; int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; EXPORT_SYMBOL(sysctl_max_skb_frags); +#if defined(PUMA7_OR_NEWER_SOC_TYPE) && defined(CONFIG_TI_PACKET_PROCESSOR) +struct kmem_cache *skbuff_intel_cookie_cache __read_mostly; +#endif + +#ifdef CONFIG_PUMA_LITEPATH +static struct skb_buf_manager_cb bm_cb; +#endif /** * skb_panic - private function for out-of-line support @@ -174,6 +194,11 @@ * the tail pointer in struct sk_buff! */ memset(skb, 0, offsetof(struct sk_buff, tail)); + if(skb_init_intel(skb)) + { + kmem_cache_free(skbuff_head_cache, skb); + return NULL; + } skb->head = NULL; skb->truesize = sizeof(struct sk_buff); atomic_set(&skb->users, 1); @@ -183,15 +208,117 @@ return skb; } +#ifdef CONFIG_TI_DOCSIS_INPUT_DEV +#define DBRIDGE_IFINDEX_CHK(__ifindex, format, args...) \ +{ \ + if (((__ifindex) < 0) || ((__ifindex) >= TI_MAX_DEVICE_INDEX)) \ + { \ + printk("\n===>>> %s - %d: Currupt " #__ifindex " - %d\n" format, __func__, __LINE__, __ifindex, ##args); \ + BUG(); \ + } \ +} +#endif + +#if PUMA7_OR_NEWER_SOC_TYPE +void __init intel_cache_init(void) +{ +#ifdef CONFIG_TI_PACKET_PROCESSOR + skbuff_intel_cookie_cache = kmem_cache_create("skbuff_intel_cookie_cache", + sizeof(SKB_INTEL_COOKIE), + 0, + SLAB_HWCACHE_ALIGN | SLAB_PANIC, + NULL); +#endif +} + +#ifdef CONFIG_TI_PACKET_PROCESSOR +void* __alloc_skb_intel_cookie(void) +{ + SKB_INTEL_COOKIE * cookie_ptr; + + /* Get the HEAD */ +#ifdef CONFIG_AVM_WORKAROUND_DATAPIPE_PROBLEM + cookie_ptr = kmem_cache_alloc_node(skbuff_intel_cookie_cache, GFP_ATOMIC|__GFP_ZERO, NUMA_NO_NODE); +#else + cookie_ptr = kmem_cache_alloc_node(skbuff_intel_cookie_cache, GFP_ATOMIC, NUMA_NO_NODE); +#endif + if (!cookie_ptr) + return cookie_ptr; + + prefetchw(cookie_ptr); + return (void *) cookie_ptr; +} +EXPORT_SYMBOL(__alloc_skb_intel_cookie); + +void kfree_skb_intel_cookie(struct sk_buff *skb) +{ + void *cookie = SKB_GET_COOKIE_P(skb); + + kmem_cache_free(skbuff_intel_cookie_cache, cookie); + SKB_GET_COOKIE_P(skb) = NULL; +} +EXPORT_SYMBOL(kfree_skb_intel_cookie); + +#endif /* CONFIG_TI_PACKET_PROCESSOR */ +#endif + +/** + * skb_init_intel - initialize TI/Intel extensions of sk_buff + * @skb: pointer sk_buff structure + * + * Initializes TI/Intel sk_buff data members. Usually used on just + * allocated buffers. currently from __alloc_skb and build_skb + */ +int skb_init_intel(struct sk_buff *skb) +{ +#ifdef CONFIG_TI_PACKET_PROCESSOR +#if PUMA7_OR_NEWER_SOC_TYPE + SKB_GET_COOKIE_P(skb) = __alloc_skb_intel_cookie(); + if (!SKB_GET_COOKIE_P(skb)) + { + return -1; + } +#endif +#endif +#ifdef CONFIG_TI_META_DATA + skb->ti_meta_info = 0; + skb->ti_meta_info2= 0; + skb->ti_ds_traffic_prio = 0; +#endif /* CONFIG_TI_META_DATA */ +#ifdef CONFIG_INTEL_NF_GWMETA_SUPPORT + skb->ti_gw_meta= 0; +#endif /* INTEL_NF_GWMETA_SUPPORT */ +#ifdef CONFIG_TI_DOCSIS_INPUT_DEV + skb->ti_docsis_input_dev = NULL; +#endif /* CONFIG_TI_DOCSIS_INPUT_DEV */ +#ifdef CONFIG_INTEL_DOCSIS_ICMP_IIF + skb->docsis_icmp_iif = 0; +#endif /* CONFIG_INTEL_DOCSIS_ICMP_IIF */ +#ifdef CONFIG_TI_L2_SELECTIVE_FORWARDER + skb->ti_selective_fwd_dev_info = 0; +#endif /* CONFIG_TI_L2_SELECTIVE_FORWARDER */ + +#ifdef CONFIG_TI_PACKET_PROCESSOR + memset((void *)SKB_GET_COOKIE_P(skb), 0, sizeof(SKB_INTEL_COOKIE)); + SKB_GET_PP_INFO_P(skb)->egress_queue = TI_PPM_EGRESS_QUEUE_INVALID; +#ifndef CONFIG_MACH_PUMA5 + SKB_GET_PP_INFO_P(skb)->pp_session.session_handle = AVALANCHE_PP_INVALID_SESSION; /* Set invalid session */ +#endif +#endif /* CONFIG_TI_PACKET_PROCESSOR */ + + return 0; +} + + /** - * __alloc_skb - allocate a network buffer - * @size: size to allocate - * @gfp_mask: allocation mask - * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache - * instead of head cache and allocate a cloned (child) skb. - * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for - * allocations in case the data is required for writeback - * @node: numa node to allocate memory on + * __alloc_skb - allocate a network buffer + * @size: size to allocate + * @gfp_mask: allocation mask + * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache + * instead of head cache and allocate a cloned (child) skb. + * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for + * allocations in case the data is required for writeback + * @node: numa node to allocate memory on * * Allocate a new &sk_buff. The returned buffer has no headroom and a * tail room of at least size bytes. The object has a reference count @@ -203,26 +330,28 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, int flags, int node) { - struct kmem_cache *cache; - struct skb_shared_info *shinfo; - struct sk_buff *skb; - u8 *data; - bool pfmemalloc; - - cache = (flags & SKB_ALLOC_FCLONE) - ? skbuff_fclone_cache : skbuff_head_cache; + struct kmem_cache *cache; + struct skb_shared_info *shinfo; + struct sk_buff *skb; + u8 *data; + bool pfmemalloc; + + cache = (flags & SKB_ALLOC_FCLONE) + ? skbuff_fclone_cache : skbuff_head_cache; + + if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) + gfp_mask |= __GFP_MEMALLOC; + + /* Get the HEAD */ + skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); + if (!skb) + goto out; + prefetchw(skb); - if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) - gfp_mask |= __GFP_MEMALLOC; - - /* Get the HEAD */ - skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); - if (!skb) - goto out; - prefetchw(skb); + - /* We do our best to align skb_shared_info on a separate cache - * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives + /* We do our best to align skb_shared_info on a separate cache + * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives * aligned memory blocks, unless SLUB/SLAB debug is enabled. * Both skb->head and skb_shared_info are cache line aligned. */ @@ -244,6 +373,9 @@ * the tail pointer in struct sk_buff! */ memset(skb, 0, offsetof(struct sk_buff, tail)); +#ifdef CONFIG_AVM_NET_DEBUG_SKBUFF_LEAK + skb_track_caller(skb); +#endif /* Account for allocated memory : skb + skb->head */ skb->truesize = SKB_TRUESIZE(size); skb->pfmemalloc = pfmemalloc; @@ -255,11 +387,16 @@ skb->mac_header = (typeof(skb->mac_header))~0U; skb->transport_header = (typeof(skb->transport_header))~0U; - /* make sure we initialize shinfo sequentially */ - shinfo = skb_shinfo(skb); - memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); - atomic_set(&shinfo->dataref, 1); - kmemcheck_annotate_variable(shinfo->destructor_arg); + /* make sure we initialize shinfo sequentially */ + shinfo = skb_shinfo(skb); + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); + atomic_set(&shinfo->dataref, 1); + kmemcheck_annotate_variable(shinfo->destructor_arg); + + if(skb_init_intel(skb)) + { + goto no_ppinfo; + } if (flags & SKB_ALLOC_FCLONE) { struct sk_buff_fclones *fclones; @@ -270,11 +407,17 @@ skb->fclone = SKB_FCLONE_ORIG; atomic_set(&fclones->fclone_ref, 1); + if(skb_init_intel(&fclones->skb2)) + { + goto no_ppinfo; + } fclones->skb2.fclone = SKB_FCLONE_CLONE; fclones->skb2.pfmemalloc = pfmemalloc; } out: - return skb; + return skb; +no_ppinfo: + kfree(data); nodata: kmem_cache_free(cache, skb); skb = NULL; @@ -329,7 +472,13 @@ atomic_set(&shinfo->dataref, 1); kmemcheck_annotate_variable(shinfo->destructor_arg); - return skb; + if(skb_init_intel(skb)) + { + kmem_cache_free(skbuff_head_cache, skb); + return NULL; + } + + return skb; } /* build_skb() is wrapper over __build_skb(), that specifically @@ -356,6 +505,9 @@ struct page_frag_cache page; size_t skb_count; void *skb_cache[NAPI_SKB_CACHE_SIZE]; +#if defined(PUMA7_OR_NEWER_SOC_TYPE) && defined(CONFIG_TI_PACKET_PROCESSOR) + void *skb_cookie[NAPI_SKB_CACHE_SIZE]; +#endif }; static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); @@ -586,7 +738,12 @@ static void skb_free_head(struct sk_buff *skb) { unsigned char *head = skb->head; - +#ifdef CONFIG_PUMA_LITEPATH + if (bm_cb.is_bm_skb && bm_cb.is_bm_skb(skb)) { + bm_cb.bm_free_skb(skb); + return; + } +#endif if (skb->head_frag) skb_free_frag(head); else @@ -633,6 +790,9 @@ switch (skb->fclone) { case SKB_FCLONE_UNAVAILABLE: +#if defined(PUMA7_OR_NEWER_SOC_TYPE) && defined(CONFIG_TI_PACKET_PROCESSOR) + kfree_skb_intel_cookie(skb); +#endif kmem_cache_free(skbuff_head_cache, skb); return; @@ -654,6 +814,10 @@ if (!atomic_dec_and_test(&fclones->fclone_ref)) return; fastpath: +#if defined(PUMA7_OR_NEWER_SOC_TYPE) && defined(CONFIG_TI_PACKET_PROCESSOR) + kfree_skb_intel_cookie(&fclones->skb1); + kfree_skb_intel_cookie(&fclones->skb2); +#endif kmem_cache_free(skbuff_fclone_cache, fclones); } @@ -670,6 +834,9 @@ #if IS_ENABLED(CONFIG_NF_CONNTRACK) nf_conntrack_put(skb->nfct); #endif +#if IS_ENABLED(CONFIG_AVM_PA_GENERIC_CT) + generic_ct_put(SKB_GENERIC_CT(skb)); +#endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) nf_bridge_put(skb->nf_bridge); #endif @@ -779,6 +946,10 @@ if (nc->skb_count) { kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count, nc->skb_cache); +#if defined(PUMA7_OR_NEWER_SOC_TYPE) && defined(CONFIG_TI_PACKET_PROCESSOR) + kmem_cache_free_bulk(skbuff_intel_cookie_cache, nc->skb_count, + nc->skb_cookie); +#endif nc->skb_count = 0; } } @@ -791,6 +962,9 @@ skb_release_all(skb); /* record skb to CPU local list */ +#if defined(PUMA7_OR_NEWER_SOC_TYPE) && defined(CONFIG_TI_PACKET_PROCESSOR) + nc->skb_cookie[nc->skb_count] = SKB_GET_COOKIE_P(skb); +#endif nc->skb_cache[nc->skb_count++] = skb; #ifdef CONFIG_SLUB @@ -802,6 +976,10 @@ if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE, nc->skb_cache); +#if defined(PUMA7_OR_NEWER_SOC_TYPE) && defined(CONFIG_TI_PACKET_PROCESSOR) + kmem_cache_free_bulk(skbuff_intel_cookie_cache, NAPI_SKB_CACHE_SIZE, + nc->skb_cookie); +#endif nc->skb_count = 0; } } @@ -895,7 +1073,6 @@ CHECK_SKB_FIELD(tc_verd); #endif #endif - } /* @@ -929,7 +1106,40 @@ atomic_inc(&(skb_shinfo(skb)->dataref)); skb->cloned = 1; - +#if IS_ENABLED(CONFIG_DIRECTCONNECT_DP_API) + /* used by directconnect peripherals to store session info */ + n->DW0 = 0; + n->DW1 = 0; + n->DW2 = 0; + n->DW3 = 0; +#endif +#ifdef CONFIG_TI_META_DATA + C(ti_meta_info); + C(ti_meta_info2); + C(ti_ds_traffic_prio); +#endif /* CONFIG_TI_META_DATA */ +#ifdef CONFIG_TI_DOCSIS_INPUT_DEV + C(ti_docsis_input_dev); + if (n->ti_docsis_input_dev) + { + DBRIDGE_IFINDEX_CHK(n->ti_docsis_input_dev->ifindex, "dev %p, devname %s, ti_docsis_input_dev %p, ti_docsis_input_dev->name %s", n->dev, n->dev ? n->dev->name : NULL, n->ti_docsis_input_dev, n->ti_docsis_input_dev->name); + } +#endif /* CONFIG_TI_DOCSIS_INPUT_DEV */ +#ifdef CONFIG_INTEL_DOCSIS_ICMP_IIF + C(docsis_icmp_iif); +#endif /* CONFIG_INTEL_DOCSIS_ICMP_IIF */ +#ifdef CONFIG_TI_L2_SELECTIVE_FORWARDER + C(ti_selective_fwd_dev_info); +#endif /* CONFIG_TI_L2_SELECTIVE_FORWARDER */ +#ifdef CONFIG_TI_PACKET_PROCESSOR +memcpy((void *)SKB_GET_PP_INFO_P(n), (void *)SKB_GET_PP_INFO_P(skb), sizeof(*SKB_GET_PP_INFO_P(skb))); +#endif /* CONFIG_TI_PACKET_PROCESSOR */ + /* Not to be copied by __copy_skb_header(). __copy_skb_header() is used + * during segmentation. Copies created by that function may not inherit + * the same pkt_info because avm_pa cannot tell them apart. + */ + if (IS_ENABLED(CONFIG_AVM_PA)) + memcpy(AVM_PKT_INFO(n), AVM_PKT_INFO(skb), sizeof(struct avm_pa_pkt_info)); return n; #undef C } @@ -1048,6 +1258,11 @@ if (!n) return NULL; + if(skb_init_intel(n)) + { + kmem_cache_free(skbuff_head_cache, n); + return NULL; + } kmemcheck_annotate_bitfield(n, flags1); n->fclone = SKB_FCLONE_UNAVAILABLE; } @@ -1078,6 +1293,36 @@ skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; +#ifdef CONFIG_TI_META_DATA + new->ti_meta_info = old->ti_meta_info; + new->ti_meta_info2 = old->ti_meta_info2; + new->ti_ds_traffic_prio = old->ti_ds_traffic_prio; +#endif /* CONFIG_TI_META_DATA */ +#ifdef CONFIG_INTEL_NF_GWMETA_SUPPORT + new->ti_gw_meta = old->ti_gw_meta; +#endif /* INTEL_NF_GWMETA_SUPPORT */ +#ifdef CONFIG_TI_DOCSIS_INPUT_DEV + new->ti_docsis_input_dev = old->ti_docsis_input_dev ; + if (new->ti_docsis_input_dev) + { + DBRIDGE_IFINDEX_CHK(new->ti_docsis_input_dev->ifindex, "dev %p, devname %s, ti_docsis_input_dev %p, ti_docsis_input_dev->name %s", new->dev, new->dev ? new->dev->name : NULL, new->ti_docsis_input_dev, new->ti_docsis_input_dev->name); + } +#endif /* CONFIG_TI_DOCSIS_INPUT_DEV */ +#ifdef CONFIG_INTEL_DOCSIS_ICMP_IIF + new->docsis_icmp_iif = old->docsis_icmp_iif; +#endif /* CONFIG_INTEL_DOCSIS_ICMP_IIF */ +#ifdef CONFIG_TI_L2_SELECTIVE_FORWARDER + new->ti_selective_fwd_dev_info = old->ti_selective_fwd_dev_info; +#endif /* CONFIG_TI_L2_SELECTIVE_FORWARDER */ +#ifdef CONFIG_TI_PACKET_PROCESSOR + memcpy((void *)SKB_GET_PP_INFO_P(new), (void *)SKB_GET_PP_INFO_P(old), sizeof(*SKB_GET_PP_INFO_P(old))); +#endif /* CONFIG_TI_PACKET_PROCESSOR */ + /* Not to be copied by __copy_skb_header(). __copy_skb_header() is used + * during segmentation. Copies created by that function may not inherit + * the same pkt_info because avm_pa cannot tell them apart. + */ + if (IS_ENABLED(CONFIG_AVM_PA)) + memcpy(AVM_PKT_INFO(new), AVM_PKT_INFO(old), sizeof(struct avm_pa_pkt_info)); } static inline int skb_alloc_rx_flag(const struct sk_buff *skb) @@ -1275,6 +1520,12 @@ skb->hdr_len = 0; skb->nohdr = 0; atomic_set(&skb_shinfo(skb)->dataref, 1); + +#ifdef CONFIG_PUMA_LITEPATH + if (bm_cb.is_bm_skb && bm_cb.is_bm_skb(skb)) + bm_cb.bm_clear_skb(skb); +#endif + return 0; nofrags: @@ -3551,6 +3802,12 @@ 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); +#if defined(PUMA7_OR_NEWER_SOC_TYPE) && defined(CONFIG_TI_PACKET_PROCESSOR) + intel_cache_init(); +#endif +#if IS_ENABLED(CONFIG_AVM_PA_GENERIC_CT) + generic_ct_init(); +#endif } static int @@ -4362,10 +4619,16 @@ void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) { - if (head_stolen) { + if (head_stolen) + { skb_release_head_state(skb); +#if defined(PUMA7_OR_NEWER_SOC_TYPE) && defined(CONFIG_TI_PACKET_PROCESSOR) + kfree_skb_intel_cookie(skb); +#endif kmem_cache_free(skbuff_head_cache, skb); - } else { + } + else + { __kfree_skb(skb); } } @@ -4471,7 +4734,10 @@ skb->ignore_df = 0; skb_dst_drop(skb); secpath_reset(skb); - nf_reset(skb); + /* TMA/MQU 20170411: Is this the right thing for namespace + * changes? We think so. See JZ-30001. + */ + nf_reset_no_generic_ct(skb); nf_reset_trace(skb); #ifdef CONFIG_NET_SWITCHDEV @@ -4648,6 +4914,28 @@ } EXPORT_SYMBOL(skb_vlan_untag); +#ifdef CONFIG_PUMA_LITEPATH +int skb_register_buf_manager(struct skb_buf_manager_cb *cb) +{ + if (bm_cb.is_bm_skb) { + pr_err("buffer manager already registered\n"); + return 1; + } + + pr_info("skb buffer manager registered\n"); + memcpy(&bm_cb, cb, sizeof(bm_cb)); + return 0; +} +EXPORT_SYMBOL_GPL(skb_register_buf_manager); + +void skb_unregister_buf_manager(void) +{ + pr_info("skb buffer manager unregistered\n"); + memset(&bm_cb, 0, sizeof(bm_cb)); +} +EXPORT_SYMBOL_GPL(skb_unregister_buf_manager); +#endif + int skb_ensure_writable(struct sk_buff *skb, int write_len) { if (!pskb_may_pull(skb, write_len))