--- zzzz-none-000/linux-4.1.52/include/linux/skbuff.h 2018-05-28 02:26:45.000000000 +0000 +++ bcm63-7530ax-731/linux-4.1.52/include/linux/skbuff.h 2022-03-02 11:37:13.000000000 +0000 @@ -30,6 +30,26 @@ #include #include #include + +#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG) +#include +#endif + +#if defined(CONFIG_BCM_KF_BPM_BUF_TRACKING) +#if defined(CONFIG_BCM_BPM_BUF_TRACKING) +#include +#define KERN_GBPM_TRACK_BUF(buf, value, info) GBPM_TRACK_BUF(buf, GBPM_DRV_KERN, value, info) +#define KERN_GBPM_TRACK_SKB(skb, value, info) GBPM_TRACK_SKB(skb, GBPM_DRV_KERN, value, info) +#define KERN_GBPM_TRACK_FKB(fkb, value, info) GBPM_TRACK_FKB(fkb, GBPM_DRV_KERN, value, info) +#else +#define KERN_GBPM_TRACK_BUF(buf, value, info) do{}while(0) +#define KERN_GBPM_TRACK_SKB(skb, value, info) do{}while(0) +#define KERN_GBPM_TRACK_FKB(fkb, value, info) do{}while(0) +#define GBPM_INC_REF(buf) do{}while(0) +#define GBPM_DEC_REF(buf) do{}while(0) +#endif +#endif + #include #include #include @@ -157,12 +177,95 @@ struct iov_iter; struct napi_struct; +#if defined(CONFIG_BCM_KF_MAP) +#define MAP_FORWARD_NONE 0 +#define MAP_FORWARD_MODE1 1 +#define MAP_FORWARD_MODE2 2 +#define MAP_FORWARD_MODE3 3 /* MAP-E Pre-Fragmentation */ +#endif + +#if defined(CONFIG_BCM_KF_NBUFF) +/* This is required even if blog is not defined, so it falls + under the nbuff catagory +*/ +struct blog_t; /* defined(CONFIG_BLOG) */ + +#ifndef NULL_STMT +#define NULL_STMT do { /* NULL BODY */ } while (0) +#endif + +typedef void (*RecycleFuncP)(void *nbuff_p, unsigned long context, uint32_t flags); +#define SKB_DATA_RECYCLE (1 << 0) +#define SKB_DATA_NO_RECYCLE (~SKB_DATA_RECYCLE) /* to mask out */ + +#define SKB_RECYCLE (1 << 1) +#define SKB_NO_RECYCLE (~SKB_RECYCLE) /* to mask out */ + +#define SKB_RECYCLE_NOFREE (1 << 2) /* DO NOT USE */ +#define SKB_RECYCLE_FPM_DATA (1 << 3) /* Data buffer from Runner FPM pool */ +#define SKB_RNR_FLOOD (1 << 4) /* Data buffer flooded by Runner to flooding-capable ports */ + +/* Indicates whether a sk_buf or a data buffer is in BPM pristine state */ +#define SKB_BPM_PRISTINE (1 << 5) + +/* UDP Speed Test flags */ +#define SKB_RNR_UDPSPDT_BASIC (1 << 6) +#define SKB_RNR_UDPSPDT_IPERF3 (1 << 7) + +#define SKB_RNR_FLAGS (SKB_RNR_FLOOD | SKB_RNR_UDPSPDT_BASIC | SKB_RNR_UDPSPDT_IPERF3) + +#define SKB_BPM_TAINTED(skb) \ +({ \ + ((struct sk_buff *)skb)->recycle_flags &= ~SKB_BPM_PRISTINE; \ + (skb_shinfo(skb))->dirty_p = NULL; \ +}) + + +#define SKB_DATA_PRISTINE(skb) \ +({ \ + (skb_shinfo(skb))->dirty_p = ((struct sk_buff *)skb)->head; \ +}) + +struct fkbuff; + +extern void skb_frag_xmit4(struct sk_buff *origskb, struct net_device *txdev, + uint32_t is_pppoe, uint32_t minMtu, void *ip_p); +extern void skb_frag_xmit6(struct sk_buff *origskb, struct net_device *txdev, + uint32_t is_pppoe, uint32_t minMtu, void *ip_p); +extern struct sk_buff * skb_xlate(struct fkbuff *fkb_p); +extern struct sk_buff * skb_xlate_dp(struct fkbuff *fkb_p, uint8_t *dirty_p); +extern int skb_avail_headroom(const struct sk_buff *skb); +extern void skb_bpm_tainted(struct sk_buff *skb); + +#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE)) +#define SKB_VLAN_MAX_TAGS 4 +#endif + +#define CONFIG_SKBSHINFO_HAS_DIRTYP 1 +#endif // CONFIG_BCM_KF_NBUFF + #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct nf_conntrack { atomic_t use; }; #endif +#if defined(CONFIG_BCM_KF_WL) +struct nf_bridge_info { + atomic_t use; + enum { + BRNF_PROTO_UNCHANGED, + BRNF_PROTO_8021Q, + BRNF_PROTO_PPPOE + } orig_proto; + bool pkt_otherhost; + unsigned int mask; + struct net_device *physindev; + struct net_device *physoutdev; + char neigh_header[8]; + __be32 ipv4_daddr; +}; +#else #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct nf_bridge_info { atomic_t use; @@ -179,6 +282,7 @@ __be32 ipv4_daddr; }; #endif +#endif struct sk_buff_head { /* These two members must be first. */ @@ -314,6 +418,11 @@ * the end of the header data, ie. at skb->end. */ struct skb_shared_info { +#if defined(CONFIG_BCM_KF_NBUFF) + /* to preserve compat with binary only modules, do not change the + * position of this field relative to the start of the structure. */ + __u8 *dirty_p; +#endif /* defined(CONFIG_BCM_KF_NBUFF) */ unsigned char nr_frags; __u8 tx_flags; unsigned short gso_size; @@ -338,6 +447,19 @@ skb_frag_t frags[MAX_SKB_FRAGS]; }; +#if defined(CONFIG_BCM_KF_RUNNER) +#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE) +typedef struct bl_buffer_info { + unsigned char *buffer; /* address of the buffer from bpm */ + unsigned char *packet; /* address of the data */ + unsigned int buffer_len; /* size of the buffer */ + unsigned int packet_len; /* size of the data packet */ + unsigned int buffer_number; /* the buffer location in the bpm */ + unsigned int port; /* the port */ +} bl_skbuff_info; +#endif /* CONFIG_BCM_RUNNER */ +#endif /* CONFIG_BCM_KF_RUNNER */ + /* We divide dataref into two halves. The higher 16 bits hold references * to the payload part of skb->data. The lower 16 bits hold references to * the entire skb->data. A clone of a headerless skb holds the length of @@ -448,8 +570,67 @@ return delta_us; } +#if defined(CONFIG_BCM_KF_NBUFF) +typedef union wlFlowInf +{ + uint32_t u32; + union { + union { + struct { + /* Start - Shared fields between ucast and mcast */ + uint32_t is_ucast:1; + /* wl_prio is 4 bits for nic and 3 bits for dhd. Plan is + to make NIC as 3 bits after more analysis */ + uint32_t wl_prio:4; + /* End - Shared fields between ucast and mcast */ + uint32_t nic_reserved1:11; + uint32_t wl_chainidx:16; + }; + struct { + uint32_t overlayed_field:16; + uint32_t ssid_dst:16; /* For bridged traffic we don't have chainidx (0xFE) */ + }; + } nic; + + struct { + /* Start - Shared fields between ucast and mcast */ + uint32_t is_ucast:1; + uint32_t wl_prio:4; + /* End - Shared fields between ucast and mcast */ + /* Start - Shared fields between dhd ucast and dhd mcast */ + uint32_t flowring_idx:10; + /* End - Shared fields between dhd ucast and dhd mcast */ + uint32_t dhd_reserved:13; + uint32_t ssid:4; + } dhd; + } ucast; + struct { + /* Start - Shared fields between ucast and mcast */ + /* for multicast, WFD does not need to populate this flowring_idx, it is used internally by dhd driver */ + uint32_t is_ucast:1; + uint32_t wl_prio:4; + /* End - Shared fields between ucast and mcast */ + /* Start - Shared fields between dhd ucast and dhd mcast */ + uint32_t flowring_idx:10; + /* End - Shared fields between dhd ucast and dhd mcast */ + uint32_t mcast_reserved:1; + uint32_t ssid_vector:16; + } mcast; + + struct { + /* Start - Shared fields b/w ucast, mcast & pktfwd */ + uint32_t is_ucast : 1; /* Start - Shared fields b/w ucast, mcast */ + uint32_t wl_prio : 4; /* packet priority */ + /* End - Shared fields between ucast, mcast & pktfwd */ + uint32_t pktfwd_reserved : 7; + uint32_t ssid : 4; + uint32_t pktfwd_key : 16; /* pktfwd_key_t : 2b domain, 2b incarn, 12b index */ + } pktfwd; + +} wlFlowInf_t; +#endif -/** +/** * struct sk_buff - socket buffer * @next: Next buffer in list * @prev: Previous buffer in list @@ -497,7 +678,7 @@ * @wifi_acked_valid: wifi_acked was set * @wifi_acked: whether frame was acked on wifi or not * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS - * @napi_id: id of the NAPI struct this skb came from + * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking * @mark: Generic packet mark * @vlan_proto: vlan encapsulation protocol @@ -533,28 +714,162 @@ }; struct sock *sk; struct net_device *dev; +#if defined(CONFIG_BCM_KF_NBUFF) + void *tunl; + + union { + /* 3 bytes unused */ + unsigned int recycle_and_rnr_flags; + unsigned int recycle_flags; + }; /* + * Several skb fields have been regrouped together for better data locality + * cache performance, 16byte cache line proximity. + * In 32 bit architecture, we have 32 bytes of data before this comment. + * In 64 bit architecture, we have 52 bytes of data at this point. + */ + + /*--- members common to fkbuff: begin here ---*/ + struct { + union { + /* see fkb_in_skb_test() */ + void *fkbInSkb; + struct sk_buff_head *list; + }; + + /* defined(CONFIG_BLOG), use blog_ptr() */ + struct blog_t *blog_p; + unsigned char *data; + + /* The len in fkb is only 24 bits other 8 bits are used as internal flags + * when fkbInSkb is used the max len can be only 24 bits, the bits 31-24 + * are cleared + * currently we don't have a case where len can be >24 bits. + */ + union { + unsigned int len; + /* used for fkb_in_skb test */ + __u32 len_word; + }; + + union { + __u32 mark; + __u32 dropcount; + void *queue; + /* have to declare the following variation of fkb_mark + * for the ease of handling 64 bit vs 32 bit in fcache + */ + unsigned long fkb_mark; + __u32 fc_ctxt; /* hybrid flow cache context */ + }; + + union { + __u32 priority; + wlFlowInf_t wl; + }; + + /* Recycle preallocated skb or data */ + RecycleFuncP recycle_hook; + + union { + unsigned long recycle_context; + struct sk_buff *next_free; + __u32 fpm_num; + }; +#ifdef CONFIG_64BIT + } ____cacheline_aligned; + /* + * purposedly making the above fkbuff data structure cacheline aligned + * in 64 bit architecture. + * This can ensure the offset to the content is fixed into same cacheline. + * Main reason we only declare as cacheline_aligned for 64 bit is that + * we have manually calculated to ensure that this structure is 32 byte + * aligned in 32 bit architecture. If we add ____cacheline_aligned + * also for 32 bit architecture, it will waste 64 byte memory if that + * architecture is with 64 byte cache line size (i.e., 63148). + */ +#else + }; +#endif + /*--- members common to fkbuff: end here ---*/ + + struct nf_conntrack *nfct; /* CONFIG_NETFILTER */ + struct sk_buff *nfct_reasm; /* CONFIG_NF_CONNTRACK MODULE*/ + +/* + * ------------------------------- CAUTION!!! --------------------------------- + * Do NOT add a new field or modify any existing field before this line + * to the beginning of the struct sk_buff. Doing so will cause struct sk_buff + * to be incompatible with the compiled binaries and may cause the binary to + * crash. + * --------------------------------------------------------------------------- + */ +#endif + /* * This is the control buffer. It is free to use for every * layer. Please put your private variables there. If you * want to keep them across layers you have to do a skb_clone() * first. This is owned by whoever has the skb queued ATM. */ +#if defined(CONFIG_BCM_KF_NBUFF) + char cb[64] ____cacheline_aligned; +#else char cb[48] __aligned(8); +#endif +#if defined(CONFIG_BCM_KF_WL) + union { + __u32 wl_cb[6]; + struct { + /* pktc_cb should hold space for void* and unsigned int */ + unsigned char pktc_cb[16]; +#if defined(CONFIG_CPU_BIG_ENDIAN) + __u16 dma_index; /* used by HND router for NIC Bulk Tx */ + __u16 pktc_flags; /* wl_flags */ + __u16 wl_flowid; /* cfp flowid */ + __u16 wl_rsvd; +#else /* !CONFIG_CPU_BIG_ENDIAN */ + __u16 pktc_flags; + __u16 dma_index; + __u16 wl_rsvd; + __u16 wl_flowid; +#endif /* !CONFIG_CPU_BIG_ENDIAN */ + }; + } __aligned(8); +#endif /* CONFIG_BCM_KF_WL */ unsigned long _skb_refdst; void (*destructor)(struct sk_buff *skb); +#if defined(CONFIG_BCM_KF_BIN_CONFIG_INDEP) + /* CONFIG_XFRM is toggled by BRCM_KERNEL_CRYPTO and + causes binary incompatible issue w/ the remaining offset shifted. + Enabled this field constantly. + */ + struct sec_path *sp; +#else #ifdef CONFIG_XFRM struct sec_path *sp; #endif +#endif /* CONFIG_BCM_KF_BIN_CONFIG_INDEP */ +#if defined(CONFIG_BCM_KF_NBUFF) +#else /* CONFIG_BCM_KF_NBUFF */ #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct nf_conntrack *nfct; #endif +#endif +#if defined(CONFIG_BCM_KF_WL) + struct nf_bridge_info *nf_bridge; +#else #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct nf_bridge_info *nf_bridge; #endif +#endif +#if defined(CONFIG_BCM_KF_NBUFF) + unsigned int data_len; +#else unsigned int len, data_len; +#endif __u16 mac_len, hdr_len; @@ -616,6 +931,12 @@ __u8 ipvs_property:1; __u8 inner_protocol_type:1; __u8 remcsum_offload:1; +#if defined(CONFIG_BCM_KF_MAP) && (defined(CONFIG_BCM_MAP) || defined(CONFIG_BCM_MAP_MODULE)) + __u8 map_forward:2; + __u8 map_mf:1; + __u32 map_offset; + __u32 map_id; +#endif /* 3 or 5 bit hole */ #ifdef CONFIG_NET_SCHED @@ -632,7 +953,10 @@ __u16 csum_offset; }; }; +#ifdef CONFIG_BCM_KF_NBUFF +#else __u32 priority; +#endif int skb_iif; __u32 hash; __be16 vlan_proto; @@ -646,11 +970,18 @@ #ifdef CONFIG_NETWORK_SECMARK __u32 secmark; #endif + + /* AVM FRITZ!OS EXTENSION */ + __u32 fritz_os_mark; /* private mark used by FRITZ!OS */ + +#if defined(CONFIG_BCM_KF_NBUFF) + __u32 reserved_tailroom; +#else union { __u32 mark; __u32 reserved_tailroom; }; - +#endif /* CONFIG_BCM_KF_NBUFF */ union { __be16 inner_protocol; __u8 inner_ipproto; @@ -668,12 +999,85 @@ /* private: */ __u32 headers_end[0]; /* public: */ +#if defined(CONFIG_BCM_KF_NBUFF) + unsigned char *clone_wr_head; /* indicates drivers(ex:enet)about writable headroom in aggregated skb*/ + unsigned char *clone_fc_head; /* indicates fcache about writable headroom in aggregated skb */ + + union { + unsigned int vtag_word; + struct { unsigned short vtag, vtag_save; }; + }; + union { /* CONFIG_NET_SCHED CONFIG_NET_CLS_ACT*/ + unsigned int tc_word; + }; + +#endif /* CONFIG_BCM_KF_NBUFF */ + +#if IS_ENABLED(CONFIG_AVM_NET_SKB_INPUT_DEV) +struct net_device *input_dev; +#endif + +#if defined(CONFIG_BCM_KF_NBUFF) +#if defined(CONFIG_BCM_KF_VLAN) && (defined(CONFIG_BCM_VLAN) || defined(CONFIG_BCM_VLAN_MODULE)) + __u16 vlan_count; + __u16 vlan_tpid; + __u32 cfi_save; + __u32 vlan_header[SKB_VLAN_MAX_TAGS]; + struct net_device *rxdev; +#endif // CONFIG_BCM_KF_VLAN + union { + struct { + __u32 reserved:31; + __u32 restore_rx_vlan:1; /* Restore Rx VLAN at xmit. Used in ONT mode */ + }; + __u32 bcm_flags_word; + }bcm_flags; +#if defined(CONFIG_BLOG_FEATURE) + union { + __u32 u32[BLOG_MAX_PARAM_NUM]; + __u16 u16[BLOG_MAX_PARAM_NUM * 2]; + __u8 u8[BLOG_MAX_PARAM_NUM * 4]; + } ipt_log; + __u32 ipt_check; +#define IPT_MATCH_LENGTH (1 << 1) +#define IPT_MATCH_TCP (1 << 2) +#define IPT_MATCH_UDP (1 << 3) +#define IPT_MATCH_TOS (1 << 4) +#define IPT_MATCH_DSCP (1 << 5) +#define IPT_TARGET_CLASSIFY (1 << 6) +#define IPT_TARGET_CONNMARK (1 << 7) +#define IPT_TARGET_CONNSECMARK (1 << 8) +#define IPT_TARGET_DSCP (1 << 9) +#define IPT_TARGET_HL (1 << 10) +#define IPT_TARGET_LED (1 << 11) +#define IPT_TARGET_MARK (1 << 12) +#define IPT_TARGET_NFLOG (1 << 13) +#define IPT_TARGET_NFQUEUE (1 << 14) +#define IPT_TARGET_NOTRACK (1 << 15) +#define IPT_TARGET_RATEEST (1 << 16) +#define IPT_TARGET_SECMARK (1 << 17) +#define IPT_TARGET_SKIPLOG (1 << 18) +#define IPT_TARGET_TCPMSS (1 << 19) +#define IPT_TARGET_TCPOPTSTRIP (1 << 20) +#define IPT_TARGET_TOS (1 << 21) +#define IPT_TARGET_TPROXY (1 << 22) +#define IPT_TARGET_TRACE (1 << 23) +#define IPT_TARGET_TTL (1 << 24) +#define IPT_TARGET_CHECK (1 << 25) +#endif + /* Physical device where this pkt is received */ + struct net_device *in_dev; +#endif /* These elements must be at the end, see alloc_skb() for details. */ sk_buff_data_t tail; sk_buff_data_t end; +#if defined(CONFIG_BCM_KF_NBUFF) + unsigned char *head; +#else unsigned char *head, *data; +#endif unsigned int truesize; atomic_t users; }; @@ -710,7 +1114,7 @@ */ static inline struct dst_entry *skb_dst(const struct sk_buff *skb) { - /* If refdst was not refcounted, check we still are in a + /* If refdst was not refcounted, check we still are in a * rcu_read_lock section */ WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && @@ -762,6 +1166,43 @@ return (struct rtable *)skb_dst(skb); } +#if defined(CONFIG_BCM_KF_WL) +/** + * Zero out the skb's control buffers + * @skb: buffer + * + * Fast zero ARMv7(ldmia,stmia using 256B global all zeros) ARMv8(stp,zero) + */ +static inline void skb_cb_zero(struct sk_buff * skb) +{ + unsigned long long *cb; /* 8B assigns - ARMv8 stp + zero reg */ + + cb = (unsigned long long *) (&skb->cb[0]); /* 64 bytes */ + + *(cb + 0) = 0ULL; *(cb + 1) = 0ULL; /* 16 Bytes = 16 Bytes */ + *(cb + 2) = 0ULL; *(cb + 3) = 0ULL; /* + 16 Bytes = 32 Bytes */ + *(cb + 4) = 0ULL; *(cb + 5) = 0ULL; /* + 16 Bytes = 48 Bytes */ +#if defined(CONFIG_BCM_KF_NBUFF) + *(cb + 6) = 0ULL; *(cb + 7) = 0ULL; /* + 16 Bytes = 64 Bytes */ +#endif + +#if defined(CONFIG_BCM_KF_WL) + cb = (unsigned long long *)(&skb->wl_cb[0]); /* 24 bytes */ + + *(cb + 0) = 0ULL; *(cb + 1) = 0ULL; /* 16 Bytes = 16 Bytes */ + *(cb + 2) = 0ULL; /* + 8 Bytes = 24 Bytes */ +#endif +} +#endif /* CONFIG_BCM_KF_WL */ + +#if defined(CONFIG_BCM_KF_RUNNER) +#if defined(CONFIG_BCM_RDPA) || defined(CONFIG_BCM_RDPA_MODULE) +extern void bl_kfree_skb_structure(struct sk_buff *skb); +extern void bl_kfree_skb_structure_irq(struct sk_buff *skb); +#endif /* CONFIG_BCM_RUNNER */ +#endif /* CONFIG_BCM_KF_RUNNER */ + + void kfree_skb(struct sk_buff *skb); void kfree_skb_list(struct sk_buff *segs); void skb_tx_error(struct sk_buff *skb); @@ -991,6 +1432,36 @@ return &skb_shinfo(skb)->hwtstamps; } +#if defined(CONFIG_BCM_KF_NBUFF) +/* Returns size of struct sk_buff */ +extern size_t skb_size(void); +extern size_t skb_aligned_size(void); +extern int skb_layout_test(int head_offset, int tail_offset, int end_offset); + +/** + * skb_headerinit - initialize a socket buffer header + * @headroom: reserved headroom size + * @datalen: data buffer size, data buffer is allocated by caller + * @skb: skb allocated by caller + * @data: data buffer allocated by caller + * @recycle_hook: callback function to free data buffer and skb + * @recycle_context: context value passed to recycle_hook, param1 + * @blog_p: pass a blog to a skb for logging + * + * Initializes the socket buffer and assigns the data buffer to it. + * Both the sk_buff and the pointed data buffer are pre-allocated. + * + */ +void skb_headerinit(unsigned int headroom, unsigned int datalen, + struct sk_buff *skb, unsigned char *data, + RecycleFuncP recycle_hook, unsigned long recycle_context, + struct blog_t * blog_p); + +extern void skb_header_free(struct sk_buff *skb); + +#endif /* CONFIG_BCM_KF_NBUFF */ + + /** * skb_queue_empty - check if a queue is empty * @list: queue head @@ -1141,6 +1612,25 @@ atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); } +#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM_USBNET_ACCELERATION)) +/** + * skb_clone_headers_set - set the clone_fc_head and clone_wr_head in + * an aggregated skb(ex: used in USBNET RX packet aggregation) + * @skb: buffer to operate on + * @len: lenghth of writable clone headroom + * + * when this pointer is set you can still modify the cloned packet and also + * expand the packet till clone_wr_head. This is used in cases on packet aggregation. + */ +static inline void skb_clone_headers_set(struct sk_buff *skb, unsigned int len) +{ + skb->clone_fc_head = skb->data - len; + if (skb_cloned(skb)) + skb->clone_wr_head = skb->data - len; + else + skb->clone_wr_head = NULL; +} +#endif /** * __skb_header_release - release reference to header * @skb: buffer to operate on @@ -1741,6 +2231,30 @@ return skb->data - skb->head; } +#if (defined(CONFIG_BCM_KF_USBNET) && defined(CONFIG_BCM_USBNET_ACCELERATION)) +/** + * skb_writable_headroom - bytes preceding skb->data that are writable(even on some + * cloned skb's); + * @skb: buffer to check + * + * Return the number of bytes of writable free space preceding the skb->data of an &sk_buff. + * note:skb->cloned_wr_head is used to indicate the padding between 2 packets when multiple packets + * are present in buffer pointed by skb->head(ex: used in USBNET RX packet aggregation) + * + */ +static inline unsigned int skb_writable_headroom(const struct sk_buff *skb) +{ + if (skb_cloned(skb)) { + if (skb->clone_wr_head) + return skb->data - skb->clone_wr_head; + else if (skb->clone_fc_head) + return 0; + } + + return skb_headroom(skb); +} +#endif + /** * skb_tailroom - bytes at buffer end * @skb: buffer to check @@ -2531,11 +3045,11 @@ if (skb->ip_summed == CHECKSUM_NONE) { __wsum csum = 0; if (csum_and_copy_from_iter(skb_put(skb, copy), copy, - &csum, from) == copy) { + &csum, from) == (unsigned int)copy) { skb->csum = csum_block_add(skb->csum, csum, off); return 0; } - } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy) + } else if (copy_from_iter(skb_put(skb, copy), copy, from) == (unsigned int)copy) return 0; __skb_trim(skb, off); @@ -2549,7 +3063,7 @@ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; return page == skb_frag_page(frag) && - off == frag->page_offset + skb_frag_size(frag); + off == (int)(frag->page_offset + skb_frag_size(frag)); } return false; } @@ -2735,12 +3249,12 @@ static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) { - return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; + return copy_from_iter(data, len, &msg->msg_iter) == (unsigned int)len ? 0 : -EFAULT; } static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len) { - return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; + return copy_to_iter(data, len, &msg->msg_iter) == (unsigned int)len ? 0 : -EFAULT; } struct skb_checksum_ops { @@ -2872,11 +3386,11 @@ #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ -static inline void skb_clone_tx_timestamp(struct sk_buff *skb) +static inline void skb_clone_tx_timestamp(struct sk_buff *skb __maybe_unused) { } -static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) +static inline bool skb_defer_rx_timestamp(struct sk_buff *skb __maybe_unused) { return false; } @@ -3090,7 +3604,7 @@ return 0; } -static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) +static inline __wsum null_compute_pseudo(struct sk_buff *skb __maybe_unused, int proto __maybe_unused) { return 0; } @@ -3139,7 +3653,7 @@ } static inline void __skb_checksum_convert(struct sk_buff *skb, - __sum16 check, __wsum pseudo) + __sum16 check __maybe_unused, __wsum pseudo) { skb->csum = ~pseudo; skb->ip_summed = CHECKSUM_COMPLETE; @@ -3211,7 +3725,7 @@ atomic_inc(&nf_bridge->use); } #endif /* CONFIG_BRIDGE_NETFILTER */ -static inline void nf_reset(struct sk_buff *skb) +static inline void nf_reset(struct sk_buff *skb __maybe_unused) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(skb->nfct); @@ -3223,7 +3737,7 @@ #endif } -static inline void nf_reset_trace(struct sk_buff *skb) +static inline void nf_reset_trace(struct sk_buff *skb __maybe_unused) { #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) skb->nf_trace = 0; @@ -3238,8 +3752,8 @@ } /* Note: This doesn't put any conntrack and bridge info in dst. */ -static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, - bool copy) +static inline void __nf_copy(struct sk_buff *dst __maybe_unused, const struct sk_buff *src __maybe_unused, + bool copy __maybe_unused) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) dst->nfct = src->nfct; @@ -3257,7 +3771,7 @@ #endif } -static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) +static inline void nf_copy(struct sk_buff *dst __maybe_unused, const struct sk_buff *src __maybe_unused) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(dst->nfct); @@ -3279,10 +3793,11 @@ skb->secmark = 0; } #else -static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) +static inline void skb_copy_secmark(struct sk_buff *to __maybe_unused, + const struct sk_buff *from __maybe_unused) { } -static inline void skb_init_secmark(struct sk_buff *skb) +static inline void skb_init_secmark(struct sk_buff *skb __maybe_unused) { } #endif @@ -3332,7 +3847,7 @@ u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, unsigned int num_tx_queues); -static inline struct sec_path *skb_sec_path(struct sk_buff *skb) +static inline struct sec_path *skb_sec_path(struct sk_buff *skb __maybe_unused) { #ifdef CONFIG_XFRM return skb->sp; @@ -3440,7 +3955,8 @@ * Instead of forcing ip_summed to CHECKSUM_NONE, we can * use this helper, to document places where we make this assertion. */ -static inline void skb_checksum_none_assert(const struct sk_buff *skb) +static inline void skb_checksum_none_assert(const struct sk_buff *skb + __maybe_unused) { #ifdef DEBUG BUG_ON(skb->ip_summed != CHECKSUM_NONE);