--- zzzz-none-000/linux-4.19.183/net/ipv4/tcp_timer.c 2021-03-24 10:07:39.000000000 +0000 +++ bcm63-7530ax-756/linux-4.19.183/net/ipv4/tcp_timer.c 2023-06-28 08:54:21.000000000 +0000 @@ -20,6 +20,9 @@ #include #include +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#include +#endif #include static u32 tcp_retransmit_stamp(const struct sock *sk) @@ -58,7 +61,11 @@ * Returns: Nothing (void) */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static void tcp_write_err(struct sock *sk) +#else +void tcp_write_err(struct sock *sk) +#endif { sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; sk->sk_error_report(sk); @@ -114,7 +121,11 @@ (!tp->snd_wnd && !tp->packets_out)) do_reset = true; if (do_reset) +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_send_active_reset(sk, GFP_ATOMIC); +#else + tp->ops->send_active_reset(sk, GFP_ATOMIC); +#endif tcp_done(sk); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); return 1; @@ -186,9 +197,15 @@ * after "boundary" unsuccessful, exponentially backed-off * retransmissions with an initial RTO of TCP_RTO_MIN. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static bool retransmits_timed_out(struct sock *sk, unsigned int boundary, unsigned int timeout) +#else +bool retransmits_timed_out(struct sock *sk, + unsigned int boundary, + unsigned int timeout) +#endif { const unsigned int rto_base = TCP_RTO_MIN; unsigned int linear_backoff_thresh, start_ts; @@ -214,7 +231,11 @@ } /* A write timeout has occurred. Process the after effects. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static int tcp_write_timeout(struct sock *sk) +#else +int tcp_write_timeout(struct sock *sk) +#endif { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); @@ -229,6 +250,19 @@ sk_rethink_txhash(sk); } retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + +#ifdef CONFIG_MPTCP + /* Stop retransmitting MP_CAPABLE options in SYN if timed out. */ + if (tcp_sk(sk)->request_mptcp && + icsk->icsk_retransmits >= sysctl_mptcp_syn_retries) { + tcp_sk(sk)->request_mptcp = 0; + + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLERETRANSFALLBACK); + } +#endif /* CONFIG_MPTCP */ + +#endif expired = icsk->icsk_retransmits >= retry_until; } else { if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) { @@ -324,18 +358,39 @@ struct inet_connection_sock *icsk = from_timer(icsk, t, icsk_delack_timer); struct sock *sk = &icsk->icsk_inet.sk; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + struct tcp_sock *tp = tcp_sk(sk); + struct sock *meta_sk = mptcp(tp) ? mptcp_meta_sk(sk) : sk; +#endif +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) bh_lock_sock(sk); if (!sock_owned_by_user(sk)) { +#else + bh_lock_sock(meta_sk); + if (!sock_owned_by_user(meta_sk)) { +#endif tcp_delack_timer_handler(sk); } else { icsk->icsk_ack.blocked = 1; +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); +#else + __NET_INC_STATS(sock_net(meta_sk), LINUX_MIB_DELAYEDACKLOCKED); +#endif /* deleguate our work to tcp_release_cb() */ if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags)) sock_hold(sk); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + if (mptcp(tp)) + mptcp_tsq_flags(sk); +#endif } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) bh_unlock_sock(sk); +#else + bh_unlock_sock(meta_sk); +#endif sock_put(sk); } @@ -379,7 +434,16 @@ } if (icsk->icsk_probes_out >= max_probes) { +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) abort: tcp_write_err(sk); +#else +abort: + tcp_write_err(sk); + if (is_meta_sk(sk) && + mptcp_in_infinite_mapping_weak(tp->mpcb)) { + mptcp_sub_force_close_all(tp->mpcb, NULL); + } +#endif } else { /* Only send another probe if we didn't close things up. */ tcp_send_probe0(sk); @@ -595,7 +659,11 @@ break; case ICSK_TIME_RETRANS: icsk->icsk_pending = 0; +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_retransmit_timer(sk); +#else + tcp_sk(sk)->ops->retransmit_timer(sk); +#endif break; case ICSK_TIME_PROBE0: icsk->icsk_pending = 0; @@ -612,16 +680,32 @@ struct inet_connection_sock *icsk = from_timer(icsk, t, icsk_retransmit_timer); struct sock *sk = &icsk->icsk_inet.sk; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + struct sock *meta_sk = mptcp(tcp_sk(sk)) ? mptcp_meta_sk(sk) : sk; +#endif +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) bh_lock_sock(sk); if (!sock_owned_by_user(sk)) { +#else + bh_lock_sock(meta_sk); + if (!sock_owned_by_user(meta_sk)) { +#endif tcp_write_timer_handler(sk); } else { /* delegate our work to tcp_release_cb() */ if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags)) sock_hold(sk); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + if (mptcp(tcp_sk(sk))) + mptcp_tsq_flags(sk); +#endif } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) bh_unlock_sock(sk); +#else + bh_unlock_sock(meta_sk); +#endif sock_put(sk); } @@ -651,11 +735,19 @@ struct sock *sk = from_timer(sk, t, sk_timer); struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + struct sock *meta_sk = mptcp(tp) ? mptcp_meta_sk(sk) : sk; +#endif u32 elapsed; /* Only process if socket is not in use. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) bh_lock_sock(sk); if (sock_owned_by_user(sk)) { +#else + bh_lock_sock(meta_sk); + if (sock_owned_by_user(meta_sk)) { +#endif /* Try again later. */ inet_csk_reset_keepalive_timer (sk, HZ/20); goto out; @@ -667,16 +759,41 @@ } tcp_mstamp_refresh(tp); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + + if (tp->send_mp_fclose) { + if (icsk->icsk_retransmits >= MPTCP_FASTCLOSE_RETRIES) { + tcp_write_err(sk); + goto out; + } + + tcp_send_ack(sk); + icsk->icsk_retransmits++; + + icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); + elapsed = icsk->icsk_rto; + goto resched; + } + +#endif if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { if (tp->linger2 >= 0) { const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; if (tmo > 0) { +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); +#else + tp->ops->time_wait(sk, TCP_FIN_WAIT2, tmo); +#endif goto out; } } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_send_active_reset(sk, GFP_ATOMIC); +#else + tp->ops->send_active_reset(sk, GFP_ATOMIC); +#endif goto death; } @@ -701,11 +818,19 @@ icsk->icsk_probes_out > 0) || (icsk->icsk_user_timeout == 0 && icsk->icsk_probes_out >= keepalive_probes(tp))) { +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_send_active_reset(sk, GFP_ATOMIC); +#else + tp->ops->send_active_reset(sk, GFP_ATOMIC); +#endif tcp_write_err(sk); goto out; } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) { +#else + if (tp->ops->write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) { +#endif icsk->icsk_probes_out++; elapsed = keepalive_intvl_when(tp); } else { @@ -729,7 +854,11 @@ tcp_done(sk); out: +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) bh_unlock_sock(sk); +#else + bh_unlock_sock(meta_sk); +#endif sock_put(sk); }