--- zzzz-none-000/linux-4.19.183/net/ipv4/tcp_output.c 2021-03-24 10:07:39.000000000 +0000 +++ bcm63-7530ax-756/linux-4.19.183/net/ipv4/tcp_output.c 2023-06-28 08:54:21.000000000 +0000 @@ -36,6 +36,14 @@ #define pr_fmt(fmt) "TCP: " fmt +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#include +#include +#if IS_ENABLED(CONFIG_IPV6) +#include +#endif +#include +#endif #include #include @@ -45,11 +53,17 @@ #include +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, int push_one, gfp_t gfp); +#endif /* Account for new data that has been sent to the network. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) +#else +void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) +#endif { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); @@ -242,12 +256,25 @@ * value can be stuffed directly into th->window for an outgoing * frame. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static u16 tcp_select_window(struct sock *sk) +#else +u16 tcp_select_window(struct sock *sk) +#endif { struct tcp_sock *tp = tcp_sk(sk); u32 old_win = tp->rcv_wnd; +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) u32 cur_win = tcp_receive_window(tp); u32 new_win = __tcp_select_window(sk); +#else + /* The window must never shrink at the meta-level. At the subflow we + * have to allow this. Otherwise we may announce a window too large + * for the current meta-level sk_rcvbuf. + */ + u32 cur_win = tcp_receive_window(mptcp(tp) ? tcp_sk(mptcp_meta_sk(sk)) : tp); + u32 new_win = tp->ops->__select_window(sk); +#endif /* Never shrink the offered window */ if (new_win < cur_win) { @@ -375,7 +402,11 @@ /* Constructs common control bits of non-data skb. If SYN/FIN is present, * auto increment end seqno. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) +#else +void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) +#endif { skb->ip_summed = CHECKSUM_PARTIAL; @@ -390,7 +421,11 @@ TCP_SKB_CB(skb)->end_seq = seq; } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static inline bool tcp_urg_mode(const struct tcp_sock *tp) +#else +bool tcp_urg_mode(const struct tcp_sock *tp) +#endif { return tp->snd_una != tp->snd_up; } @@ -401,6 +436,9 @@ #define OPTION_WSCALE (1 << 3) #define OPTION_FAST_OPEN_COOKIE (1 << 8) #define OPTION_SMC (1 << 9) +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +/* Before adding here - take a look at OPTION_MPTCP in include/net/mptcp.h */ +#endif static void smc_options_write(__be32 *ptr, u16 *options) { @@ -417,6 +455,7 @@ #endif } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) struct tcp_out_options { u16 options; /* bit field of OPTION_* */ u16 mss; /* 0 to disable */ @@ -428,6 +467,7 @@ struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ }; +#endif /* Write previously computed TCP options to the packet. * * Beware: Something in the Internet is very sensitive to the ordering of @@ -442,7 +482,11 @@ * (but it may well be that other scenarios fail similarly). */ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) struct tcp_out_options *opts) +#else + struct tcp_out_options *opts, struct sk_buff *skb) +#endif { u16 options = opts->options; /* mungable copy */ @@ -536,6 +580,11 @@ } smc_options_write(ptr, &options); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + + if (unlikely(OPTION_MPTCP & opts->options)) + mptcp_options_write(ptr, tp, opts, skb); +#endif } static void smc_set_option(const struct tcp_sock *tp, @@ -621,6 +670,10 @@ if (unlikely(!(OPTION_TS & opts->options))) remaining -= TCPOLEN_SACKPERM_ALIGNED; } +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + if (tp->request_mptcp || mptcp(tp)) + mptcp_syn_options(sk, opts, &remaining); +#endif if (fastopen && fastopen->cookie.len >= 0) { u32 need = fastopen->cookie.len; @@ -704,6 +757,11 @@ smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + if (ireq->saw_mpc) + mptcp_synack_options(req, opts, &remaining); + +#endif return MAX_TCP_OPTION_SPACE - remaining; } @@ -737,9 +795,14 @@ opts->tsecr = tp->rx_opt.ts_recent; size += TCPOLEN_TSTAMP_ALIGNED; } +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + if (mptcp(tp)) + mptcp_established_options(sk, skb, opts, &size); +#endif eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; if (unlikely(eff_sacks)) { +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; opts->num_sack_blocks = min_t(unsigned int, eff_sacks, @@ -748,6 +811,19 @@ if (likely(opts->num_sack_blocks)) size += TCPOLEN_SACK_BASE_ALIGNED + opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; +#else + const unsigned remaining = MAX_TCP_OPTION_SPACE - size; + if (remaining < TCPOLEN_SACK_BASE_ALIGNED) + opts->num_sack_blocks = 0; + else + opts->num_sack_blocks = + min_t(unsigned int, eff_sacks, + (remaining - TCPOLEN_SACK_BASE_ALIGNED) / + TCPOLEN_SACK_PERBLOCK); + if (opts->num_sack_blocks) + size += TCPOLEN_SACK_BASE_ALIGNED + + opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; +#endif } return size; @@ -787,19 +863,47 @@ tcp_xmit_retransmit_queue(sk); } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, 0, GFP_ATOMIC); +#else + tcp_sk(sk)->ops->write_xmit(sk, tcp_current_mss(sk), + tcp_sk(sk)->nonagle, 0, GFP_ATOMIC); +#endif } } static void tcp_tsq_handler(struct sock *sk) { +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) bh_lock_sock(sk); if (!sock_owned_by_user(sk)) +#else + struct tcp_sock *tp = tcp_sk(sk); + struct sock *meta_sk = mptcp(tp) ? mptcp_meta_sk(sk) : sk; + + bh_lock_sock(meta_sk); + if (!sock_owned_by_user(meta_sk)) { +#endif tcp_tsq_write(sk); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) sock_hold(sk); bh_unlock_sock(sk); +#else + + if (mptcp(tp)) + tcp_tsq_write(meta_sk); + } else { + if (!test_and_set_bit(TCP_TSQ_DEFERRED, &meta_sk->sk_tsq_flags)) + sock_hold(meta_sk); + + if ((mptcp(tp)) && (sk->sk_state != TCP_CLOSE)) + mptcp_tsq_flags(sk); + } + + bh_unlock_sock(meta_sk); +#endif } /* * One tasklet per cpu tries to send more skbs. @@ -833,10 +937,19 @@ } } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED | \ TCPF_WRITE_TIMER_DEFERRED | \ TCPF_DELACK_TIMER_DEFERRED | \ TCPF_MTU_REDUCED_DEFERRED) +#else +#define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED | \ + TCPF_WRITE_TIMER_DEFERRED | \ + TCPF_DELACK_TIMER_DEFERRED | \ + TCPF_MTU_REDUCED_DEFERRED | \ + TCPF_PATH_MANAGER_DEFERRED |\ + TCPF_SUB_DEFERRED) +#endif /** * tcp_release_cb - tcp release_sock() callback * @sk: socket @@ -859,6 +972,11 @@ if (flags & TCPF_TSQ_DEFERRED) { tcp_tsq_write(sk); __sock_put(sk); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + + if (mptcp(tcp_sk(sk))) + tcp_tsq_write(mptcp_meta_sk(sk)); +#endif } /* Here begins the tricky part : * We are called from release_sock() with : @@ -883,6 +1001,15 @@ inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); __sock_put(sk); } +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + if (flags & TCPF_PATH_MANAGER_DEFERRED) { + if (tcp_sk(sk)->mpcb->pm_ops->release_sock) + tcp_sk(sk)->mpcb->pm_ops->release_sock(sk); + __sock_put(sk); + } + if (flags & TCPF_SUB_DEFERRED) + mptcp_tsq_sub_deferred(sk); +#endif } EXPORT_SYMBOL(tcp_release_cb); @@ -985,7 +1112,11 @@ sock_hold(sk); } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb) +#else +void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb) +#endif { skb->skb_mstamp = tp->tcp_mstamp; list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); @@ -1074,6 +1205,10 @@ skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm); +#ifdef CONFIG_AVM_SK_TC_INDEX + skb->tc_index = sk->sk_tc_index; +#endif + /* Build TCP header and checksum it. */ th = (struct tcphdr *)skb->data; th->source = inet->inet_sport; @@ -1097,10 +1232,18 @@ } } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_options_write((__be32 *)(th + 1), tp, &opts); +#else + tcp_options_write((__be32 *)(th + 1), tp, &opts, skb); +#endif skb_shinfo(skb)->gso_type = sk->sk_gso_type; if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) { +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) th->window = htons(tcp_select_window(sk)); +#else + th->window = htons(tp->ops->select_window(sk)); +#endif tcp_ecn_send(sk, skb, th, tcp_header_size); } else { /* RFC1323: The window in SYN & SYN/ACK segments @@ -1158,8 +1301,13 @@ return err; } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask) +#else +int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, + gfp_t gfp_mask) +#endif { return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, tcp_sk(sk)->rcv_nxt); @@ -1170,7 +1318,11 @@ * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, * otherwise socket can stall. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) +#else +void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) +#endif { struct tcp_sock *tp = tcp_sk(sk); @@ -1183,7 +1335,11 @@ } /* Initialize TSO segments for a packet. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now) +#else +void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now) +#endif { if (skb->len <= mss_now) { /* Avoid the costly divide in the normal @@ -1200,7 +1356,11 @@ /* Pcount in the middle of the write queue got changed, we need to do various * tweaks to fix counters */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) +#else +void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) +#endif { struct tcp_sock *tp = tcp_sk(sk); @@ -1317,6 +1477,10 @@ buff->truesize += nlen; skb->truesize -= nlen; +#ifdef CONFIG_AVM_SK_TC_INDEX + skb->tc_index = sk->sk_tc_index; +#endif + /* Correct the sequence numbers. */ TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; @@ -1368,7 +1532,11 @@ /* This is similar to __pskb_pull_tail(). The difference is that pulled * data is not copied, but immediately discarded. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static int __pskb_trim_head(struct sk_buff *skb, int len) +#else +int __pskb_trim_head(struct sk_buff *skb, int len) +#endif { struct skb_shared_info *shinfo; int i, k, eat; @@ -1590,6 +1758,9 @@ return mss_now; } +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +EXPORT_SYMBOL(tcp_current_mss); +#endif /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. * As additional protections, we do not touch cwnd in retransmission phases, @@ -1613,7 +1784,11 @@ tp->snd_cwnd_stamp = tcp_jiffies32; } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) +#else +void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) +#endif { const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; struct tcp_sock *tp = tcp_sk(sk); @@ -1672,8 +1847,13 @@ * But we can avoid doing the divide again given we already have * skb_pcount = skb->len / mss_now */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, const struct sk_buff *skb) +#else +void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, + const struct sk_buff *skb) +#endif { if (skb->len < tcp_skb_pcount(skb) * mss_now) tp->snd_sml = TCP_SKB_CB(skb)->end_seq; @@ -1732,11 +1912,19 @@ } /* Returns the portion of skb which can be sent right away */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, unsigned int mss_now, unsigned int max_segs, int nonagle) +#else +unsigned int tcp_mss_split_point(const struct sock *sk, + const struct sk_buff *skb, + unsigned int mss_now, + unsigned int max_segs, + int nonagle) +#endif { const struct tcp_sock *tp = tcp_sk(sk); u32 partial, needed, window, max_len; @@ -1766,13 +1954,23 @@ /* Can at least one segment of SKB be sent right now, according to the * congestion window rules? If so, return how many segments are allowed. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, const struct sk_buff *skb) +#else +unsigned int tcp_cwnd_test(const struct tcp_sock *tp, + const struct sk_buff *skb) +#endif { u32 in_flight, cwnd, halfcwnd; /* Don't be strict about the congestion window for the final FIN. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && +#else + if (skb && + (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && +#endif tcp_skb_pcount(skb) == 1) return 1; @@ -1787,12 +1985,19 @@ halfcwnd = max(cwnd >> 1, 1U); return min(halfcwnd, cwnd - in_flight); } +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +EXPORT_SYMBOL(tcp_cwnd_test); +#endif /* Initialize TSO state of a skb. * This must be invoked the first time we consider transmitting * SKB onto the wire. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now) +#else +int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now) +#endif { int tso_segs = tcp_skb_pcount(skb); @@ -1807,8 +2012,13 @@ /* Return true if the Nagle test allows this packet to be * sent now. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, unsigned int cur_mss, int nonagle) +#else +bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, + unsigned int cur_mss, int nonagle) +#endif { /* Nagle rule does not apply to frames, which sit in the middle of the * write_queue (they have no chances to get new data). @@ -1820,7 +2030,12 @@ return true; /* Don't use the nagle rule for urgent data (or for the final FIN). */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) +#else + if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || + mptcp_is_data_fin(skb)) +#endif return true; if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle)) @@ -1830,9 +2045,14 @@ } /* Does at least the first segment of SKB fit into the send window? */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static bool tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb, unsigned int cur_mss) +#else +bool tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb, + unsigned int cur_mss) +#endif { u32 end_seq = TCP_SKB_CB(skb)->end_seq; @@ -1841,6 +2061,9 @@ return !after(end_seq, tcp_wnd_end(tp)); } +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +EXPORT_SYMBOL(tcp_snd_wnd_test); +#endif /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet * which is put after SKB on the list. It is very much like @@ -1993,7 +2216,11 @@ } /* If this packet won't get more data, do not wait. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) +#else + if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || mptcp_is_data_fin(skb)) +#endif goto send_now; return true; @@ -2213,6 +2440,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, unsigned int factor) { +#if !defined(CONFIG_BCM_KF_TCP_NO_TSQ) unsigned int limit; limit = max(2 * skb->truesize, sk->sk_pacing_rate >> sk->sk_pacing_shift); @@ -2238,6 +2466,7 @@ if (refcount_read(&sk->sk_wmem_alloc) > limit) return true; } +#endif return false; } @@ -2297,7 +2526,11 @@ * Returns true, if no segments are in flight and we have queued segments, * but cannot send anything now because of SWS or another problem. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, +#else +bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, +#endif int push_one, gfp_t gfp) { struct tcp_sock *tp = tcp_sk(sk); @@ -2311,7 +2544,16 @@ sent_pkts = 0; tcp_mstamp_refresh(tp); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) if (!push_one) { +#else + + /* pmtu not yet supported with MPTCP. Should be possible, by early + * exiting the loop inside tcp_mtu_probe, making sure that only one + * single DSS-mapping gets probed. + */ + if (!push_one && !mptcp(tp)) { +#endif /* Do MTU probing. */ result = tcp_mtu_probe(sk); if (!result) { @@ -2409,8 +2651,14 @@ tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED); is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); + +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) if (likely(sent_pkts || is_cwnd_limited)) tcp_cwnd_validate(sk, is_cwnd_limited); +#else + if (tp->ops->cwnd_validate) + tp->ops->cwnd_validate(sk, is_cwnd_limited); +#endif if (likely(sent_pkts)) { if (tcp_in_cwnd_reduction(sk)) @@ -2506,7 +2754,11 @@ skb = tcp_send_head(sk); if (skb && tcp_snd_wnd_test(tp, skb, mss)) { pcount = tp->packets_out; +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); +#else + tp->ops->write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); +#endif if (tp->packets_out > pcount) goto probe_sent; goto rearm_timer; @@ -2568,8 +2820,13 @@ if (unlikely(sk->sk_state == TCP_CLOSE)) return; +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) if (tcp_write_xmit(sk, cur_mss, nonagle, 0, sk_gfp_mask(sk, GFP_ATOMIC))) +#else + if (tcp_sk(sk)->ops->write_xmit(sk, cur_mss, nonagle, 0, + sk_gfp_mask(sk, GFP_ATOMIC))) +#endif tcp_check_probe_timer(sk); } @@ -2582,7 +2839,12 @@ BUG_ON(!skb || skb->len < mss_now); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); +#else + tcp_sk(sk)->ops->write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, + sk->sk_allocation); +#endif } /* This function returns the amount that we can raise the @@ -2804,6 +3066,12 @@ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) return; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + /* Currently not supported for MPTCP - but it should be possible */ + if (mptcp(tp)) + return; + +#endif skb_rbtree_walk_from_safe(skb, tmp) { if (!tcp_can_collapse(sk, skb)) break; @@ -3274,7 +3542,11 @@ /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ th->window = htons(min(req->rsk_rcv_wnd, 65535U)); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_options_write((__be32 *)(th + 1), NULL, &opts); +#else + tcp_options_write((__be32 *)(th + 1), NULL, &opts, skb); +#endif th->doff = (tcp_header_size >> 2); __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); @@ -3355,6 +3627,7 @@ if (rcv_wnd == 0) rcv_wnd = dst_metric(dst, RTAX_INITRWND); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_select_initial_window(sk, tcp_full_space(sk), tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), &tp->rcv_wnd, @@ -3362,6 +3635,15 @@ sock_net(sk)->ipv4.sysctl_tcp_window_scaling, &rcv_wscale, rcv_wnd); +#else + tp->ops->select_initial_window(sk, tcp_full_space(sk), + tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), + &tp->rcv_wnd, + &tp->window_clamp, + sock_net(sk)->ipv4.sysctl_tcp_window_scaling, + &rcv_wscale, + rcv_wnd); +#endif tp->rx_opt.rcv_wscale = rcv_wscale; tp->rcv_ssthresh = tp->rcv_wnd; @@ -3386,6 +3668,38 @@ inet_csk(sk)->icsk_rto = tcp_timeout_init(sk); inet_csk(sk)->icsk_retransmits = 0; tcp_clear_retrans(tp); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + +#ifdef CONFIG_MPTCP + if (sock_flag(sk, SOCK_MPTCP) && mptcp_doit(sk)) { + if (is_master_tp(tp)) { + tp->request_mptcp = 1; + mptcp_connect_init(sk); + } else if (tp->mptcp) { + struct inet_sock *inet = inet_sk(sk); + + tp->mptcp->snt_isn = tp->write_seq; + tp->mptcp->init_rcv_wnd = tp->rcv_wnd; + + /* Set nonce for new subflows */ + if (sk->sk_family == AF_INET) + tp->mptcp->mptcp_loc_nonce = mptcp_v4_get_nonce( + inet->inet_saddr, + inet->inet_daddr, + inet->inet_sport, + inet->inet_dport); +#if IS_ENABLED(CONFIG_IPV6) + else + tp->mptcp->mptcp_loc_nonce = mptcp_v6_get_nonce( + inet6_sk(sk)->saddr.s6_addr32, + sk->sk_v6_daddr.s6_addr32, + inet->inet_sport, + inet->inet_dport); +#endif + } + } +#endif +#endif } static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) @@ -3648,6 +3962,9 @@ { __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); } +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +EXPORT_SYMBOL_GPL(tcp_send_ack); +#endif /* This routine sends a packet with an out of date sequence * number. It assumes the other end will try to ack it. @@ -3660,7 +3977,11 @@ * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is * out-of-date with SND.UNA-1 to probe window. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib) +#else +int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib) +#endif { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; @@ -3747,7 +4068,11 @@ unsigned long probe_max; int err; +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE); +#else + err = tp->ops->write_wakeup(sk, LINUX_MIB_TCPWINPROBE); +#endif if (tp->packets_out || tcp_write_queue_empty(sk)) { /* Cancel probe timer, if it is not required. */