--- zzzz-none-000/linux-3.10.107/net/ipv4/tcp_output.c 2017-06-27 09:49:32.000000000 +0000 +++ vr9-7490-729/linux-3.10.107/net/ipv4/tcp_output.c 2021-11-10 11:53:56.000000000 +0000 @@ -901,6 +901,9 @@ skb_orphan(skb); skb->sk = sk; skb->destructor = tcp_wfree; +#ifdef CONFIG_NET_SCHED + skb->tc_index = sk->sk_tc_index; +#endif atomic_add(skb->truesize, &sk->sk_wmem_alloc); /* Build TCP header and checksum it. */ @@ -1073,6 +1076,11 @@ if (nsize < 0) nsize = 0; + if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf)) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); + return -ENOMEM; + } + if (skb_unclone(skb, GFP_ATOMIC)) return -ENOMEM; @@ -1117,6 +1125,9 @@ /* Looks stupid, but our code really uses when of * skbs, which it never sent before. --ANK */ +#ifdef CONFIG_NET_SCHED + skb->tc_index = sk->sk_tc_index; +#endif TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; buff->tstamp = skb->tstamp; @@ -1235,8 +1246,7 @@ mss_now -= icsk->icsk_ext_hdr_len; /* Then reserve room for full set of TCP options and 8 bytes of data */ - if (mss_now < 48) - mss_now = 48; + mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss); return mss_now; } @@ -1636,7 +1646,7 @@ /* If a full-sized TSO skb can be sent, do it. */ if (limit >= min_t(unsigned int, sk->sk_gso_max_size, - tp->xmit_size_goal_segs * tp->mss_cache)) + tp->gso_segs * tp->mss_cache)) goto send_now; /* Middle in queue won't get any more data, full sendable already? */