--- zzzz-none-000/linux-4.19.183/net/core/skbuff.c 2021-03-24 10:07:39.000000000 +0000 +++ bcm63-7530ax-756/linux-4.19.183/net/core/skbuff.c 2023-06-28 08:54:21.000000000 +0000 @@ -76,12 +76,33 @@ #include #include #include +#if defined(CONFIG_AVM_SIMPLE_PROFILING) +#include +#endif + +#if defined(CONFIG_SLUB_AVM_ALLOC_LIST) +#include +#include +#include +#include +#include +#ifdef CONFIG_AVM_PA +#include +#endif +#include +#endif + +#include struct kmem_cache *skbuff_head_cache __ro_after_init; static struct kmem_cache *skbuff_fclone_cache __ro_after_init; int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; EXPORT_SYMBOL(sysctl_max_skb_frags); +#if defined(CONFIG_BCM_KF_NBUFF) +#include +#endif /* CONFIG_BCM_KF_NBUFF */ + /** * skb_panic - private function for out-of-line support * @skb: buffer @@ -212,12 +233,21 @@ size = SKB_WITH_OVERHEAD(ksize(data)); prefetchw(data + size); +#if defined(CONFIG_BCM_KF_NBUFF) + /* + * Clearing all fields -- fields that were not cleared before + * were moved to earlier locations in the structure, so just + * zeroing them out (OK, since we overwrite them shortly: + */ + memset(skb, 0, offsetof(struct sk_buff, truesize)); +#else /* * Only clear those fields we need to clear, not those that we will * actually initialise below. Hence, don't put any more fields after * the tail pointer in struct sk_buff! */ memset(skb, 0, offsetof(struct sk_buff, tail)); +#endif /* Account for allocated memory : skb + skb->head */ skb->truesize = SKB_TRUESIZE(size); skb->pfmemalloc = pfmemalloc; @@ -232,6 +262,9 @@ /* make sure we initialize shinfo sequentially */ shinfo = skb_shinfo(skb); memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); +#if defined(CONFIG_BCM_KF_NBUFF) + shinfo->dirty_p = NULL; +#endif atomic_set(&shinfo->dataref, 1); if (flags & SKB_ALLOC_FCLONE) { @@ -245,6 +278,9 @@ fclones->skb2.fclone = SKB_FCLONE_CLONE; } out: +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_skb((unsigned int)skb->data, skb); +#endif return skb; nodata: kmem_cache_free(cache, skb); @@ -545,7 +581,11 @@ skb_drop_list(&skb_shinfo(skb)->frag_list); } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static void skb_clone_fraglist(struct sk_buff *skb) +#else +void skb_clone_fraglist(struct sk_buff *skb) +#endif { struct sk_buff *list; @@ -556,6 +596,15 @@ static void skb_free_head(struct sk_buff *skb) { unsigned char *head = skb->head; +#if defined(CONFIG_BCM_KF_NBUFF) + /* If the data buffer came from a pre-allocated pool, recycle it. + * Recycling may only be performed when no references exist to it. */ + if (skb->recycle_hook && (skb->recycle_flags & SKB_DATA_RECYCLE)) { + (*skb->recycle_hook)(skb, skb->recycle_context, SKB_DATA_RECYCLE); + skb->recycle_flags &= SKB_DATA_NO_RECYCLE; /* mask out */ + return; + } +#endif if (skb->head_frag) skb_free_frag(head); @@ -590,6 +639,18 @@ { struct sk_buff_fclones *fclones; +#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) + blog_free(skb, blog_free_reason_kfree); +#endif + +#if defined(CONFIG_BCM_KF_NBUFF) + /* If the skb came from a preallocated pool, pass it to recycler hook */ + if (skb->recycle_hook && (skb->recycle_flags & SKB_RECYCLE)){ + (*skb->recycle_hook)(skb, skb->recycle_context, SKB_RECYCLE); + return; + } +#endif /* CONFIG_BCM_KF_NBUFF */ + switch (skb->fclone) { case SKB_FCLONE_UNAVAILABLE: kmem_cache_free(skbuff_head_cache, skb); @@ -627,6 +688,9 @@ #if IS_ENABLED(CONFIG_NF_CONNTRACK) nf_conntrack_put(skb_nfct(skb)); #endif +#if defined(CONFIG_AVM_PA_GENERIC_CT) + generic_ct_put(SKB_GENERIC_CT(skb)); +#endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) nf_bridge_put(skb->nf_bridge); #endif @@ -651,6 +715,15 @@ void __kfree_skb(struct sk_buff *skb) { +#if defined(CONFIG_BCM_KF_NBUFF) + if (skb->recycle_hook && (skb->recycle_flags & SKB_RECYCLE_NOFREE)) { + (*skb->recycle_hook)(skb, skb->recycle_context, SKB_RECYCLE_NOFREE); + return; + } +#endif +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_skb((unsigned int)skb->data, skb); +#endif skb_release_all(skb); kfree_skbmem(skb); } @@ -808,6 +881,9 @@ /* We do not copy old->sk */ new->dev = old->dev; memcpy(new->cb, old->cb, sizeof(old->cb)); +#if IS_ENABLED(CONFIG_AVM_NET_SKB_INPUT_DEV) + new->input_dev = old->input_dev; +#endif skb_dst_copy(new, old); #ifdef CONFIG_XFRM new->sp = secpath_get(old->sp); @@ -825,10 +901,14 @@ CHECK_SKB_FIELD(protocol); CHECK_SKB_FIELD(csum); CHECK_SKB_FIELD(hash); +#if defined(CONFIG_BCM_KF_NBUFF) +#else CHECK_SKB_FIELD(priority); +#endif CHECK_SKB_FIELD(skb_iif); CHECK_SKB_FIELD(vlan_proto); CHECK_SKB_FIELD(vlan_tci); + new->fritz_os_mark = old->fritz_os_mark; CHECK_SKB_FIELD(transport_header); CHECK_SKB_FIELD(network_header); CHECK_SKB_FIELD(mac_header); @@ -836,7 +916,11 @@ CHECK_SKB_FIELD(inner_transport_header); CHECK_SKB_FIELD(inner_network_header); CHECK_SKB_FIELD(inner_mac_header); +#if defined(CONFIG_BCM_KF_NBUFF) +#else CHECK_SKB_FIELD(mark); +#endif + #ifdef CONFIG_NETWORK_SECMARK CHECK_SKB_FIELD(secmark); #endif @@ -849,6 +933,9 @@ #ifdef CONFIG_NET_SCHED CHECK_SKB_FIELD(tc_index); #endif +#if defined(CONFIG_BCM_KF_SKB_EXT) + bcm_skbuff_copy_skb_header(new, old); +#endif } @@ -863,6 +950,13 @@ n->next = n->prev = NULL; n->sk = NULL; __copy_skb_header(n, skb); +#ifdef CONFIG_AVM_PA + /* Not to be copied by __copy_skb_header(). __copy_skb_header() is used + * during segmentation. Copies created by that function may not inherit + * the same pkt_info because avm_pa cannot tell them apart. + */ + memcpy(AVM_PKT_INFO(n), AVM_PKT_INFO(skb), sizeof(struct avm_pa_pkt_info)); +#endif C(len); C(data_len); @@ -879,6 +973,9 @@ C(head_frag); C(data); C(truesize); +#if defined(CONFIG_BCM_KF_SKB_EXT) + bcm_skbuff_skb_clone(n, skb); +#endif refcount_set(&n->users, 1); atomic_inc(&(skb_shinfo(skb)->dataref)); @@ -900,8 +997,45 @@ */ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) { +#if defined(CONFIG_BCM_KF_NBUFF) + struct sk_buff *skb; + unsigned int recycle_flags; + unsigned long recycle_context; + RecycleFuncP recycle_hook; + + skb_release_all(dst); + + /* Need to retain the recycle flags, context & hook of dst to free it + * into proper pool. */ + recycle_flags = dst->recycle_flags & SKB_RECYCLE; + recycle_hook = dst->recycle_hook; + recycle_context = dst->recycle_context; + +#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) + if(dst->blog_p) + blog_skip(dst, blog_skip_reason_skb_morph); +#endif + + if (unlikely((src->recycle_flags & SKB_DATA_RECYCLE) && + ((recycle_hook != src->recycle_hook) || + (recycle_context != src->recycle_context)))) + { + /* free the skb->head from src and reallocate from kernel + * if pskb_expand_head returns fail, unhandled error will be triggered. + * so BUG_ON here. */ + BUG_ON(pskb_expand_head(src, 0, 0, GFP_ATOMIC)); + } + + skb = __skb_clone(dst, src); + + dst->recycle_flags |= recycle_flags; + dst->recycle_hook = recycle_hook; + dst->recycle_context = recycle_context; + return skb; +#else /* CONFIG_BCM_KF_NBUFF */ skb_release_all(dst); return __skb_clone(dst, src); +#endif /* CONFIG_BCM_KF_NBUFF */ } EXPORT_SYMBOL_GPL(skb_morph); @@ -1297,6 +1431,9 @@ n->fclone = SKB_FCLONE_UNAVAILABLE; } +#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) + n->blog_p = NULL; +#endif return __skb_clone(n, skb); } EXPORT_SYMBOL(skb_clone); @@ -1320,6 +1457,13 @@ void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) { __copy_skb_header(new, old); +#ifdef CONFIG_AVM_PA + /* Not to be copied by __copy_skb_header(). __copy_skb_header() is used + * during segmentation. Copies created by that function may not inherit + * the same pkt_info because avm_pa cannot tell them apart. + */ + memcpy(AVM_PKT_INFO(new), AVM_PKT_INFO(old), sizeof(struct avm_pa_pkt_info)); +#endif skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; @@ -1509,7 +1653,21 @@ skb->head = data; skb->head_frag = 0; +#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM_USBNET_ACCELERATION)) + { + int clone_fc_len = 0; + if (skb->bcm_ext.clone_fc_head) + clone_fc_len = skb->data - skb->bcm_ext.clone_fc_head; + + skb->data += off; + + if (skb->bcm_ext.clone_fc_head) + skb->bcm_ext.clone_fc_head = skb->data - clone_fc_len; + } +#else skb->data += off; +#endif + #ifdef NET_SKBUFF_DATA_USES_OFFSET skb->end = size; off = nhead; @@ -1521,6 +1679,20 @@ skb->cloned = 0; skb->hdr_len = 0; skb->nohdr = 0; + +#if defined(CONFIG_BCM_KF_NBUFF) + /* Clear Data recycle as this buffer was allocated via kmalloc. + * Note that skb_release_data/skb_free_head might have already cleared it + * but it is not guaranteed. If the buffer is cloned, then skb_release_data + * does not clear the buffer. The original data buffer will be freed + * when the cloned skb is freed */ + skb->recycle_flags &= SKB_DATA_NO_RECYCLE; + /* The data buffer of this skb is not pre-allocated any more + * even though the skb itself is pre-allocated, + * dirty_p pertains to previous buffer so clear it */ + skb_shinfo(skb)->dirty_p = NULL; +#endif + atomic_set(&skb_shinfo(skb)->dataref, 1); skb_metadata_clear(skb); @@ -3996,6 +4168,9 @@ 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); +#if defined(CONFIG_AVM_PA_GENERIC_CT) + generic_ct_init(); +#endif } static int @@ -4960,7 +5135,10 @@ skb->ignore_df = 0; skb_dst_drop(skb); secpath_reset(skb); - nf_reset(skb); + /* TMA/MQU 20170411: Is this the right thing for namespace + * changes? We think so. See JZ-30001. + */ + nf_reset_no_generic_ct(skb); nf_reset_trace(skb); #ifdef CONFIG_NET_SWITCHDEV @@ -4974,6 +5152,27 @@ ipvs_reset(skb); skb->mark = 0; skb->tstamp = 0; + +#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) +/* + * when using containers in some network configurations, the packets + * will be treated as L2 flows while learning. Local aceleration + * is not supported for L2 flows so skip it. + * + * note: we try to add containers MAC address to Host MAC table (using + * netdev_notfier) to treat flows terminating in containers as L3 flows, + * but this check helps to catch any unknown configurations +*/ + /* MSH 2022-02-01: WireGuard re-uses skbs and calls skb_scrub_packet() + * JZ-99855 to clean its metadata, therefore blog entries must + * be removed here as well. Otherwise corrupt sessions + * will be created by flow cache, eg. ptm0->ptm0. + */ + if(skb->blog_p /* && skb->blog_p->l2_mode */) + { + blog_skip(skb, blog_skip_reason_scrub_pkt); /* No local accel in l2 mode */ + } +#endif } EXPORT_SYMBOL_GPL(skb_scrub_packet); @@ -5639,3 +5838,402 @@ */ skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); } + +#if defined(CONFIG_SLUB_AVM_ALLOC_LIST) +#undef pr_fmt +#define pr_fmt(fmt) fmt + +struct _skb_id_countlist { + unsigned long id; + unsigned long count; +}; + +enum _format_type { + is_proto = 0, + is_symbol, + is_skblen, + is_netdev, + is_slab, +}; + +#define SKB_COUNT_ID_ENTRIES 32 +struct _skb_class { + struct _skb_id_countlist countlist[SKB_COUNT_ID_ENTRIES]; + void (*skb_class_cb)(struct sk_buff *skb, struct _skb_id_countlist *countlist); + const char *name; + enum _format_type type; +}; + +static char devname[SKB_COUNT_ID_ENTRIES][IFNAMSIZ]; + +/** + * return id + * careful access/analyze/copy of dev->name (dev maybe invalid)! + * id == 0: devname is zero + * id < 0: inval pointer/device (-EINVAL) entrytable full (-ENOMEM) + */ +static int devname_to_id(struct net_device *dev) +{ + char devname_tmp[IFNAMSIZ]; + unsigned int i; + const char *p = &dev->name[0]; + + if (virt_addr_valid(p) == 0) + return dev ? -EINVAL : 0; + if (virt_addr_valid(p + IFNAMSIZ - 1) == 0) + return -EINVAL; + + if (!PageSlab(virt_to_head_page(p))) + /* support only kmalloc-alloced devices else some cases occur misunderstood DBE */ + return -EINVAL; + + for (i = 0; i < IFNAMSIZ; i++) { + devname_tmp[i] = *p; + if (*p == 0) + break; + if (isascii(*p++)) + continue; + break; + } + if (*p != 0) { + return -EINVAL; + } + for (i = 0; i < ARRAY_SIZE(devname); i++) { + if (devname[i][0] == 0) + break; + if (strncmp(devname[i], devname_tmp, IFNAMSIZ) == 0) + /* entry found */ + return i + 1; + } + if (i < ARRAY_SIZE(devname)) { + /* append */ + strncpy(devname[i], devname_tmp, IFNAMSIZ); + return i + 1; + } + return -ENOMEM; +} + +static int count_skb_id(unsigned long id, struct _skb_id_countlist *countlist) +{ + unsigned int entry; + + for (entry = 0; entry < SKB_COUNT_ID_ENTRIES; entry++) { + if (countlist[entry].id == id || + countlist[entry].count == 0) { + countlist[entry].id = id; + countlist[entry].count++; + return 0; + } + } + return -ENOMEM; +} + +/** + * @brief count all skb with same protocol + */ +static void skb_class_list_cb_protocol(struct sk_buff *skb, struct _skb_id_countlist *countlist) +{ + count_skb_id(skb->protocol, countlist); +} + +/** + * @brief count all skb with same netdev + * set reference for netdevice because we have to access to the name later + */ +static void skb_class_list_cb_netdev(struct sk_buff *skb, struct _skb_id_countlist *countlist) +{ + unsigned long id = devname_to_id(skb->dev); + + count_skb_id(id, countlist); +} + +/** + * @brief count all skb's with same destructor + */ +static void skb_class_list_cb_destructor(struct sk_buff *skb, struct _skb_id_countlist *countlist) +{ + count_skb_id((unsigned long)skb->destructor, countlist); +} + +/** + * @brief count all skb with same vlan_proto + */ +static void skb_class_list_cb_vlan_proto(struct sk_buff *skb, struct _skb_id_countlist *countlist) +{ + count_skb_id(skb->vlan_proto, countlist); +} + +/** + * @brief count all skb with valid sk or sk == null + * careful try to get slab-cachepool-name-pointer as id if sk == slab + * + */ +static void skb_class_list_cb_socket(struct sk_buff *skb, struct _skb_id_countlist *countlist) +{ + struct kmem_cache *s; + struct page *page; + void *sk = READ_ONCE(skb->sk); + + if (sk == NULL) { + count_skb_id(0, countlist); + return; + } + if (!virt_addr_valid(sk)) { + count_skb_id(-EINVAL, countlist); + return; + } + page = virt_to_head_page(sk); + if (virt_addr_valid(page) && PageSlab(page)) { + s = page->slab_cache; + + if (virt_addr_valid(s) && virt_addr_valid(s->name)) { + count_skb_id((unsigned long)s->name, countlist); + return; + } + } + count_skb_id(-EINVAL, countlist); +} + +/** + * @brief count all skb with skb_iif + */ +static void skb_class_list_cb_iif(struct sk_buff *skb, struct _skb_id_countlist *countlist) +{ + count_skb_id(skb->skb_iif, countlist); +} + +#define PACKET_LEN_AREA (ETH_FRAME_LEN + ETH_FCS_LEN) +#define PACKET_LEN_OFFSET SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +/** + * @brief count all skb len (areas) + */ +static void skb_class_list_cb_len(struct sk_buff *skb, struct _skb_id_countlist *countlist) +{ + int len = skb->len; + + if (len >= PACKET_LEN_OFFSET) { + len -= PACKET_LEN_OFFSET; + count_skb_id((len / PACKET_LEN_AREA) + 1, countlist); + return; + } + count_skb_id(0, countlist); +} + +#ifdef CONFIG_AVM_PA +/** + * @brief count all skb with pktinfo.ingress_pid_handle + */ +static void skb_class_list_cb_avm_pa_ingress_pid_handle(struct sk_buff *skb, struct _skb_id_countlist *countlist) +{ + count_skb_id(AVM_PKT_INFO(skb)->ingress_pid_handle, countlist); +} + +/** + * @brief count all skb with pktinfo.egress_pid_handle + */ +static void skb_class_list_cb_avm_pa_egress_pid_handle(struct sk_buff *skb, struct _skb_id_countlist *countlist) +{ + count_skb_id(AVM_PKT_INFO(skb)->egress_pid_handle, countlist); +} +#endif + +static struct _skb_class skb_class_list[] = { + { .skb_class_cb = skb_class_list_cb_protocol, .name = "protocol", .type = is_proto }, + { .skb_class_cb = skb_class_list_cb_vlan_proto, .name = "vlan_proto", .type = is_proto }, + { .skb_class_cb = skb_class_list_cb_netdev, .name = "netdev", .type = is_netdev }, + { .skb_class_cb = skb_class_list_cb_socket, .name = "socket", .type = is_slab}, + { .skb_class_cb = skb_class_list_cb_iif, .name = "skb_iif", .type = is_proto}, + { .skb_class_cb = skb_class_list_cb_len, .name = "len", .type = is_skblen}, +#ifdef CONFIG_AVM_PA + { .skb_class_cb = skb_class_list_cb_avm_pa_ingress_pid_handle, .name = "avm_pa_ingress_pid", .type = is_proto}, + { .skb_class_cb = skb_class_list_cb_avm_pa_egress_pid_handle, .name = "avm_pa_egress_pid ", .type = is_proto}, +#endif + { .skb_class_cb = skb_class_list_cb_destructor, .name = "destructor", .type = is_symbol }, +}; + +atomic_t busy_skb_pending_statistic; + +/** + * @ clean data and put refs for netdevices + */ +static void skb_class_list_clean(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(skb_class_list); i++) { + memset(&skb_class_list[i].countlist, 0, sizeof(skb_class_list[i].countlist)); + } + + memset(&devname, 0, sizeof(devname)); +} + +/** + * @brief callback for all pending skb's + */ +static int sk_buff_pointer_cb(void *ref, void *p) +{ + unsigned long *sum_skbs = (unsigned long *)ref; + unsigned int i; + struct sk_buff *skb = (struct sk_buff *)p; + + for (i = 0; i < ARRAY_SIZE(skb_class_list); i++) { + struct _skb_class *pscl = &skb_class_list[i]; + + pscl->skb_class_cb(skb, pscl->countlist); + } + *sum_skbs += 1; +#if 0 + if (skb->protocol) + pr_err("%s: (ref=%p) %p: ts=%llu netdev:%s destructor=%pS protocol=%x vlan_proto=%x mac=%pM avm_pa: pid=%x hsession=%x\n", + __func__, + ref, + skb, + skb_get_ktime(skb).tv64, + skb->dev ? netdev_name(skb->dev) : "?", + skb->destructor, + skb->protocol, + skb->vlan_proto, + skb_mac_header(skb), + AVM_PKT_INFO(skb)->ingress_pid_handle, + AVM_PKT_INFO(skb)->session_handle); +#endif + return 0; +} + +/** + * @brief show pending skbs-statistic on oom or /proc/avm/skb_pending + */ +static void display_skb_class_counts(struct seq_file *seq, unsigned long sum_skbs, + struct kmem_cache *s, unsigned int threshcount) +{ + unsigned int i, entry; + unsigned long len_idx, len_start; + char txt[64]; + + if (threshcount) { + snprintf(txt, sizeof(txt), " - show all counters more/equal %u", threshcount); + } else + txt[0] = 0; + + sseq_printf(seq, "%s: pending sk_buffs: %lu (%5lu KiB)%s\n", + s->name, sum_skbs, (sum_skbs * s->object_size) >> 10, txt); + + for (i = 0; i < ARRAY_SIZE(skb_class_list); i++) { + struct _skb_class *pscl = &skb_class_list[i]; + + for (entry = 0; entry < SKB_COUNT_ID_ENTRIES; entry++) { + if (pscl->countlist[entry].count == 0) + break; + if (pscl->countlist[entry].count < threshcount) + continue; + switch (pscl->type) { + case is_netdev: + sseq_printf(seq, "%s: %-18s: %6lu\n", pscl->name, + pscl->countlist[entry].id == 0 ? "no-dev" : + pscl->countlist[entry].id <= ARRAY_SIZE(devname) ? devname[pscl->countlist[entry].id - 1] : + pscl->countlist[entry].id == (unsigned long)-ENOMEM ? "devlist-full" : + pscl->countlist[entry].id == (unsigned long)-EINVAL ? "dev-freed" : "dev-?", + pscl->countlist[entry].count); + break; + case is_slab: + sseq_printf(seq, "%s: %-18s: %6lu\n", pscl->name, + (pscl->countlist[entry].id == 0 ? "(null)" : + virt_addr_valid(pscl->countlist[entry].id) ? (char *)pscl->countlist[entry].id : + "unknown"), + pscl->countlist[entry].count); + break; + case is_symbol: + sseq_printf(seq, "%s: %-48pS: %6lu\n", pscl->name, + (void *)pscl->countlist[entry].id, + pscl->countlist[entry].count); + break; + case is_proto: + sseq_printf(seq, "%s: 0x%04lx: %6lu\n", pscl->name, + pscl->countlist[entry].id, + pscl->countlist[entry].count); + break; + case is_skblen: + len_idx = pscl->countlist[entry].id; + if (len_idx == 0) { + sseq_printf(seq, "%s: %6u -%6u bytes: %6lu\n", pscl->name, + 0, PACKET_LEN_OFFSET - 1, + pscl->countlist[entry].count); + break; + } + len_idx--; + len_start = PACKET_LEN_OFFSET + (len_idx * PACKET_LEN_AREA); + sseq_printf(seq, "%s: %6lu -%6lu bytes: %6lu\n", pscl->name, + len_start, len_start + PACKET_LEN_AREA - 1, + pscl->countlist[entry].count); + break; + } + } + if (pscl->countlist[SKB_COUNT_ID_ENTRIES - 1].count) + sseq_printf(seq, "... (not all %s counted)\n", + pscl->type == is_netdev ? "netdevs" : + pscl->type == is_symbol ? "symbols" : + pscl->type == is_slab ? "sockets" : + pscl->type == is_proto ? "protocols" : "id"); + } +} + +#define SK_BUFF_THRESH_COUNT 50000 +/** + */ +static void avm_proc_skb_pending_statistic(struct seq_file *seq, void *priv) +{ + struct kmem_cache *cachetab[] = {skbuff_head_cache, skbuff_fclone_cache}; + unsigned int i, active_objs; + unsigned int *ptreshsize = priv; + + if (atomic_add_return(1, &busy_skb_pending_statistic) != 1) { + return; + } + for (i = 0; i < ARRAY_SIZE(cachetab); i++) { + unsigned long sum_skbs = 0; + struct kmem_cache *s = cachetab[i]; + + active_objs = kmem_cache_active_objects(s); + if (active_objs >= SK_BUFF_THRESH_COUNT || seq) { + kmem_cache_list_all_objects(s, &sum_skbs, sk_buff_pointer_cb); + if (!seq) + pr_err("mem-error: suspiciously much %s sk_buff's %u\n", + s->name, active_objs); + if (sum_skbs) + display_skb_class_counts(seq, sum_skbs, s, + ptreshsize ? *ptreshsize : 0); + skb_class_list_clean(); + } + } + atomic_set(&busy_skb_pending_statistic, 0); +} + +/** + */ +static int skb_oom_notify(struct notifier_block *block, + unsigned long event, void *_data) +{ + struct seq_file *seq = _data; + unsigned int threshcount = SK_BUFF_THRESH_COUNT / SKB_COUNT_ID_ENTRIES; + + avm_proc_skb_pending_statistic(seq, &threshcount); + return NOTIFY_OK; +} + +static struct notifier_block skb_oom_nb = { + .notifier_call = skb_oom_notify, + .priority = 1, +}; + +/** + */ +static __init int init_skb_oom(void) +{ + add_simple_proc_file("avm/skb_pending", NULL, + avm_proc_skb_pending_statistic, NULL); + + avm_oom_info_chain_register(&skb_oom_nb); + return 0; +} +late_initcall(init_skb_oom); +#endif