--- zzzz-none-000/linux-2.6.39.4/include/net/sock.h 2011-08-03 19:43:28.000000000 +0000 +++ puma6-atom-6490-729/linux-2.6.39.4/include/net/sock.h 2021-11-10 13:38:18.000000000 +0000 @@ -338,6 +338,7 @@ #endif __u32 sk_mark; u32 sk_classid; + unsigned long sk_tc_index; void (*sk_state_change)(struct sock *sk); void (*sk_data_ready)(struct sock *sk, int bytes); void (*sk_write_space)(struct sock *sk); @@ -641,7 +642,7 @@ { unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); - return qsize + skb->truesize > sk->sk_rcvbuf; + return qsize + skb->truesize > (unsigned int)sk->sk_rcvbuf; } /* The per-socket spinlock must be held here. */ @@ -660,7 +661,7 @@ return sk->sk_backlog_rcv(sk, skb); } -static inline void sock_rps_record_flow(const struct sock *sk) +static inline void sock_rps_record_flow(const struct sock *sk __attribute__((unused))) { #ifdef CONFIG_RPS struct rps_sock_flow_table *sock_flow_table; @@ -672,7 +673,7 @@ #endif } -static inline void sock_rps_reset_flow(const struct sock *sk) +static inline void sock_rps_reset_flow(const struct sock *sk __attribute__((unused))) { #ifdef CONFIG_RPS struct rps_sock_flow_table *sock_flow_table; @@ -684,7 +685,7 @@ #endif } -static inline void sock_rps_save_rxhash(struct sock *sk, u32 rxhash) +static inline void sock_rps_save_rxhash(struct sock *sk __attribute__((unused)), u32 rxhash __attribute__((unused))) { #ifdef CONFIG_RPS if (unlikely(sk->sk_rxhash != rxhash)) { @@ -1114,7 +1115,7 @@ #ifdef CONFIG_CGROUPS extern void sock_update_classid(struct sock *sk); #else -static inline void sock_update_classid(struct sock *sk) +static inline void sock_update_classid(struct sock *sk __attribute__((unused))) { } #endif @@ -1527,6 +1528,9 @@ skb_orphan(skb); skb->sk = sk; skb->destructor = sock_wfree; +#ifdef CONFIG_NET_SCHED + skb->tc_index = sk->sk_tc_index; +#endif /* * We used to take a refcount on sk, but following operation * is enough to guarantee sk_free() wont free this sock until @@ -1728,7 +1732,7 @@ __skb_queue_tail(&sk->sk_async_wait_queue, skb); } #else -static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) +static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early __attribute__((unused))) { __skb_unlink(skb, &sk->sk_receive_queue); __kfree_skb(skb); @@ -1736,13 +1740,13 @@ #endif static inline -struct net *sock_net(const struct sock *sk) +struct net *sock_net(const struct sock *sk __attribute__((unused))) { return read_pnet(&sk->sk_net); } static inline -void sock_net_set(struct sock *sk, struct net *net) +void sock_net_set(struct sock *sk __attribute__((unused)), struct net *net) { write_pnet(&sk->sk_net, net); }