--- zzzz-none-000/linux-5.15.111/net/core/skbuff.c 2023-05-11 14:00:40.000000000 +0000 +++ puma7-atom-6670-761/linux-5.15.111/net/core/skbuff.c 2024-02-07 10:23:30.000000000 +0000 @@ -29,7 +29,13 @@ */ /* - * The functions in this file will not compile correctly with gcc 2.4.x + * The functions in this file will not compile correctly with gcc 2.4.x + */ + +/* + * Includes Intel Corporation's changes/modifications dated: 2011, 2015, 2018, 2020. + * Changed/modified portions - Copyright (c) 2015-2020, Intel Corporation + * 1. Meta Data Extensions for Packet processor. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -82,13 +88,28 @@ #include "datagram.h" #include "sock_destructor.h" +#include + struct kmem_cache *skbuff_head_cache __ro_after_init; static struct kmem_cache *skbuff_fclone_cache __ro_after_init; #ifdef CONFIG_SKB_EXTENSIONS static struct kmem_cache *skbuff_ext_cache __ro_after_init; #endif + +#ifndef CONFIG_ARM_AVALANCHE_SOC +#include +#endif + +int skb_init_intel(struct sk_buff *skb); int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; EXPORT_SYMBOL(sysctl_max_skb_frags); +#if defined(PUMA7_OR_NEWER_SOC_TYPE) && defined(CONFIG_TI_PACKET_PROCESSOR) +struct kmem_cache *skbuff_intel_cookie_cache __read_mostly; +#endif + +#ifdef CONFIG_PUMA_LITEPATH +static struct skb_buf_manager_cb bm_cb; +#endif /** * skb_panic - private function for out-of-line support @@ -130,6 +151,9 @@ struct page_frag_cache page; unsigned int skb_count; void *skb_cache[NAPI_SKB_CACHE_SIZE]; +#if defined(PUMA7_OR_NEWER_SOC_TYPE) && defined(CONFIG_TI_PACKET_PROCESSOR) + void *skb_cookie[NAPI_SKB_CACHE_SIZE]; +#endif }; static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); @@ -243,8 +267,15 @@ return NULL; memset(skb, 0, offsetof(struct sk_buff, tail)); + __build_skb_around(skb, data, frag_size); + if(skb_init_intel(skb)) + { + kmem_cache_free(skbuff_head_cache, skb); + return NULL; + } + return skb; } @@ -376,15 +407,117 @@ * */ +#ifdef CONFIG_TI_DOCSIS_INPUT_DEV +#define DBRIDGE_IFINDEX_CHK(__ifindex, format, args...) \ +{ \ + if (((__ifindex) < 0) || ((__ifindex) >= TI_MAX_DEVICE_INDEX)) \ + { \ + printk("\n===>>> %s - %d: Currupt " #__ifindex " - %d\n" format, __func__, __LINE__, __ifindex, ##args); \ + BUG(); \ + } \ +} +#endif + +#if PUMA7_OR_NEWER_SOC_TYPE +void __init intel_cache_init(void) +{ +#ifdef CONFIG_TI_PACKET_PROCESSOR + skbuff_intel_cookie_cache = kmem_cache_create("skbuff_intel_cookie_cache", + sizeof(SKB_INTEL_COOKIE), + 0, + SLAB_HWCACHE_ALIGN | SLAB_PANIC, + NULL); +#endif +} + +#ifdef CONFIG_TI_PACKET_PROCESSOR +void* __alloc_skb_intel_cookie(void) +{ + SKB_INTEL_COOKIE * cookie_ptr; + + /* Get the HEAD */ +#ifdef CONFIG_AVM_WORKAROUND_DATAPIPE_PROBLEM + cookie_ptr = kmem_cache_alloc_node(skbuff_intel_cookie_cache, GFP_ATOMIC|__GFP_ZERO, NUMA_NO_NODE); +#else + cookie_ptr = kmem_cache_alloc_node(skbuff_intel_cookie_cache, GFP_ATOMIC, NUMA_NO_NODE); +#endif + if (!cookie_ptr) + return cookie_ptr; + + prefetchw(cookie_ptr); + return (void *) cookie_ptr; +} +EXPORT_SYMBOL(__alloc_skb_intel_cookie); + +void kfree_skb_intel_cookie(struct sk_buff *skb) +{ + void *cookie = SKB_GET_COOKIE_P(skb); + + kmem_cache_free(skbuff_intel_cookie_cache, cookie); + SKB_GET_COOKIE_P(skb) = NULL; +} +EXPORT_SYMBOL(kfree_skb_intel_cookie); + +#endif /* CONFIG_TI_PACKET_PROCESSOR */ +#endif + /** - * __alloc_skb - allocate a network buffer - * @size: size to allocate - * @gfp_mask: allocation mask - * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache - * instead of head cache and allocate a cloned (child) skb. - * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for - * allocations in case the data is required for writeback - * @node: numa node to allocate memory on + * skb_init_intel - initialize TI/Intel extensions of sk_buff + * @skb: pointer sk_buff structure + * + * Initializes TI/Intel sk_buff data members. Usually used on just + * allocated buffers. currently from __alloc_skb and build_skb + */ +int skb_init_intel(struct sk_buff *skb) +{ +#ifdef CONFIG_TI_PACKET_PROCESSOR +#if PUMA7_OR_NEWER_SOC_TYPE + SKB_GET_COOKIE_P(skb) = __alloc_skb_intel_cookie(); + if (!SKB_GET_COOKIE_P(skb)) + { + return -1; + } +#endif +#endif +#ifdef CONFIG_TI_META_DATA + skb->ti_meta_info = 0; + skb->ti_meta_info2= 0; + skb->ti_ds_traffic_prio = 0; +#endif /* CONFIG_TI_META_DATA */ +#ifdef CONFIG_INTEL_NF_GWMETA_SUPPORT + skb->ti_gw_meta= 0; +#endif /* INTEL_NF_GWMETA_SUPPORT */ +#ifdef CONFIG_TI_DOCSIS_INPUT_DEV + skb->ti_docsis_input_dev = NULL; +#endif /* CONFIG_TI_DOCSIS_INPUT_DEV */ +#ifdef CONFIG_INTEL_DOCSIS_ICMP_IIF + skb->docsis_icmp_iif = 0; +#endif /* CONFIG_INTEL_DOCSIS_ICMP_IIF */ +#ifdef CONFIG_TI_L2_SELECTIVE_FORWARDER + skb->ti_selective_fwd_dev_info = 0; +#endif /* CONFIG_TI_L2_SELECTIVE_FORWARDER */ + +#ifdef CONFIG_TI_PACKET_PROCESSOR + memset((void *)SKB_GET_COOKIE_P(skb), 0, sizeof(SKB_INTEL_COOKIE)); + SKB_GET_PP_INFO_P(skb)->egress_queue = TI_PPM_EGRESS_QUEUE_INVALID; +#ifndef CONFIG_MACH_PUMA5 + SKB_GET_PP_INFO_P(skb)->pp_session.session_handle = AVALANCHE_PP_INVALID_SESSION; /* Set invalid session */ +#endif +#endif /* CONFIG_TI_PACKET_PROCESSOR */ + + return 0; +} + + +/** + * __alloc_skb - allocate a network buffer + * @size: size to allocate + * @gfp_mask: allocation mask + * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache + * instead of head cache and allocate a cloned (child) skb. + * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for + * allocations in case the data is required for writeback + * @node: numa node to allocate memory on * * Allocate a new &sk_buff. The returned buffer has no headroom and a * tail room of at least size bytes. The object has a reference count @@ -440,9 +573,17 @@ * the tail pointer in struct sk_buff! */ memset(skb, 0, offsetof(struct sk_buff, tail)); +#ifdef CONFIG_AVM_NET_DEBUG_SKBUFF_LEAK + skb_track_caller(skb); +#endif __build_skb_around(skb, data, 0); skb->pfmemalloc = pfmemalloc; + if(skb_init_intel(skb)) + { + goto no_ppinfo; + } + if (flags & SKB_ALLOC_FCLONE) { struct sk_buff_fclones *fclones; @@ -451,11 +592,17 @@ skb->fclone = SKB_FCLONE_ORIG; refcount_set(&fclones->fclone_ref, 1); + if(skb_init_intel(&fclones->skb2)) + { + goto no_ppinfo; + } fclones->skb2.fclone = SKB_FCLONE_CLONE; } return skb; +no_ppinfo: + kfree(data); nodata: kmem_cache_free(cache, skb); return NULL; @@ -647,6 +794,13 @@ { unsigned char *head = skb->head; +#ifdef CONFIG_PUMA_LITEPATH + if (bm_cb.is_bm_skb && bm_cb.is_bm_skb(skb)) { + bm_cb.bm_free_skb(skb); + return; + } +#endif + if (skb->head_frag) { if (skb_pp_recycle(skb, head)) return; @@ -697,6 +851,9 @@ switch (skb->fclone) { case SKB_FCLONE_UNAVAILABLE: +#if defined(PUMA7_OR_NEWER_SOC_TYPE) && defined(CONFIG_TI_PACKET_PROCESSOR) + kfree_skb_intel_cookie(skb); +#endif kmem_cache_free(skbuff_head_cache, skb); return; @@ -718,6 +875,10 @@ if (!refcount_dec_and_test(&fclones->fclone_ref)) return; fastpath: +#if defined(PUMA7_OR_NEWER_SOC_TYPE) && defined(CONFIG_TI_PACKET_PROCESSOR) + kfree_skb_intel_cookie(&fclones->skb1); + kfree_skb_intel_cookie(&fclones->skb2); +#endif kmem_cache_free(skbuff_fclone_cache, fclones); } @@ -732,6 +893,9 @@ nf_conntrack_put(skb_nfct(skb)); #endif skb_ext_put(skb); +#if defined(CONFIG_AVM_PA_GENERIC_CT) + generic_ct_put(SKB_GENERIC_CT(skb)); +#endif } /* Free everything but the sk_buff shell. */ @@ -936,6 +1100,9 @@ u32 i; kasan_poison_object_data(skbuff_head_cache, skb); +#if defined(PUMA7_OR_NEWER_SOC_TYPE) && defined(CONFIG_TI_PACKET_PROCESSOR) + nc->skb_cookie[nc->skb_count] = SKB_GET_COOKIE_P(skb); +#endif nc->skb_cache[nc->skb_count++] = skb; if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { @@ -945,6 +1112,10 @@ kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_HALF, nc->skb_cache + NAPI_SKB_CACHE_HALF); +#if defined(PUMA7_OR_NEWER_SOC_TYPE) && defined(CONFIG_TI_PACKET_PROCESSOR) + kmem_cache_free_bulk(skbuff_intel_cookie_cache, NAPI_SKB_CACHE_HALF, + nc->skb_cookie + NAPI_SKB_CACHE_HALF); +#endif nc->skb_count = NAPI_SKB_CACHE_HALF; } } @@ -1081,7 +1252,36 @@ atomic_inc(&(skb_shinfo(skb)->dataref)); skb->cloned = 1; - +#if IS_ENABLED(CONFIG_DIRECTCONNECT_DP_API) + /* used by directconnect peripherals to store session info */ + n->DW0 = 0; + n->DW1 = 0; + n->DW2 = 0; + n->DW3 = 0; +#endif +#ifdef CONFIG_TI_META_DATA + C(ti_meta_info); + C(ti_meta_info2); + C(ti_ds_traffic_prio); +#endif /* CONFIG_TI_META_DATA */ +#ifdef CONFIG_TI_DOCSIS_INPUT_DEV + C(ti_docsis_input_dev); + if (n->ti_docsis_input_dev) + { + DBRIDGE_IFINDEX_CHK(n->ti_docsis_input_dev->ifindex, "dev %p, devname %s, ti_docsis_input_dev %p, ti_docsis_input_dev->name %s", n->dev, n->dev ? n->dev->name : NULL, n->ti_docsis_input_dev, n->ti_docsis_input_dev->name); + } +#endif /* CONFIG_TI_DOCSIS_INPUT_DEV */ +#ifdef CONFIG_INTEL_DOCSIS_ICMP_IIF + C(docsis_icmp_iif); +#endif /* CONFIG_INTEL_DOCSIS_ICMP_IIF */ +#ifdef CONFIG_TI_L2_SELECTIVE_FORWARDER + C(ti_selective_fwd_dev_info); +#endif /* CONFIG_TI_L2_SELECTIVE_FORWARDER */ +#ifdef CONFIG_TI_PACKET_PROCESSOR +memcpy((void *)SKB_GET_PP_INFO_P(n), (void *)SKB_GET_PP_INFO_P(skb), sizeof(*SKB_GET_PP_INFO_P(skb))); +#endif /* CONFIG_TI_PACKET_PROCESSOR */ + if (IS_ENABLED(CONFIG_AVM_PA)) + memcpy(AVM_PKT_INFO(n), AVM_PKT_INFO(skb), sizeof(struct avm_pa_pkt_info)); return n; #undef C } @@ -1524,6 +1724,11 @@ if (!n) return NULL; + if(skb_init_intel(n)) + { + kmem_cache_free(skbuff_head_cache, n); + return NULL; + } n->fclone = SKB_FCLONE_UNAVAILABLE; } @@ -1554,6 +1759,30 @@ skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; +#ifdef CONFIG_TI_META_DATA + new->ti_meta_info = old->ti_meta_info; + new->ti_meta_info2 = old->ti_meta_info2; + new->ti_ds_traffic_prio = old->ti_ds_traffic_prio; +#endif /* CONFIG_TI_META_DATA */ +#ifdef CONFIG_INTEL_NF_GWMETA_SUPPORT + new->ti_gw_meta = old->ti_gw_meta; +#endif /* INTEL_NF_GWMETA_SUPPORT */ +#ifdef CONFIG_TI_DOCSIS_INPUT_DEV + new->ti_docsis_input_dev = old->ti_docsis_input_dev ; + if (new->ti_docsis_input_dev) + { + DBRIDGE_IFINDEX_CHK(new->ti_docsis_input_dev->ifindex, "dev %p, devname %s, ti_docsis_input_dev %p, ti_docsis_input_dev->name %s", new->dev, new->dev ? new->dev->name : NULL, new->ti_docsis_input_dev, new->ti_docsis_input_dev->name); + } +#endif /* CONFIG_TI_DOCSIS_INPUT_DEV */ +#ifdef CONFIG_INTEL_DOCSIS_ICMP_IIF + new->docsis_icmp_iif = old->docsis_icmp_iif; +#endif /* CONFIG_INTEL_DOCSIS_ICMP_IIF */ +#ifdef CONFIG_TI_L2_SELECTIVE_FORWARDER + new->ti_selective_fwd_dev_info = old->ti_selective_fwd_dev_info; +#endif /* CONFIG_TI_L2_SELECTIVE_FORWARDER */ +#ifdef CONFIG_TI_PACKET_PROCESSOR + memcpy((void *)SKB_GET_PP_INFO_P(new), (void *)SKB_GET_PP_INFO_P(old), sizeof(*SKB_GET_PP_INFO_P(old))); +#endif /* CONFIG_TI_PACKET_PROCESSOR */ } EXPORT_SYMBOL(skb_copy_header); @@ -1761,6 +1990,11 @@ if (!skb->sk || skb->destructor == sock_edemux) skb->truesize += size - osize; +#ifdef CONFIG_PUMA_LITEPATH + if (bm_cb.is_bm_skb && bm_cb.is_bm_skb(skb)) + bm_cb.bm_clear_skb(skb); +#endif + return 0; nofrags: @@ -4517,6 +4751,14 @@ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); skb_extensions_init(); + +#if defined(CONFIG_AVM_PA_GENERIC_CT) + generic_ct_init(); +#endif + +#if defined(PUMA7_OR_NEWER_SOC_TYPE) && defined(CONFIG_TI_PACKET_PROCESSOR) + intel_cache_init(); +#endif } static int @@ -5373,10 +5615,16 @@ void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) { - if (head_stolen) { + if (head_stolen) + { skb_release_head_state(skb); +#if defined(PUMA7_OR_NEWER_SOC_TYPE) && defined(CONFIG_TI_PACKET_PROCESSOR) + kfree_skb_intel_cookie(skb); +#endif kmem_cache_free(skbuff_head_cache, skb); - } else { + } + else + { __kfree_skb(skb); } } @@ -5500,7 +5748,10 @@ skb->ignore_df = 0; skb_dst_drop(skb); skb_ext_reset(skb); - nf_reset_ct(skb); + /* TMA/MQU 20170411: Is this the right thing for namespace + * changes? We think so. See JZ-30001. + */ + nf_reset_no_generic_ct(skb); nf_reset_trace(skb); #ifdef CONFIG_NET_SWITCHDEV @@ -5725,6 +5976,28 @@ } EXPORT_SYMBOL(skb_vlan_untag); +#ifdef CONFIG_PUMA_LITEPATH +int skb_register_buf_manager(struct skb_buf_manager_cb *cb) +{ + if (bm_cb.is_bm_skb) { + pr_err("buffer manager already registered\n"); + return 1; + } + + pr_info("skb buffer manager registered\n"); + memcpy(&bm_cb, cb, sizeof(bm_cb)); + return 0; +} +EXPORT_SYMBOL_GPL(skb_register_buf_manager); + +void skb_unregister_buf_manager(void) +{ + pr_info("skb buffer manager unregistered\n"); + memset(&bm_cb, 0, sizeof(bm_cb)); +} +EXPORT_SYMBOL_GPL(skb_unregister_buf_manager); +#endif + int skb_ensure_writable(struct sk_buff *skb, int write_len) { if (!pskb_may_pull(skb, write_len))