--- zzzz-none-000/linux-4.19.183/net/ipv4/tcp_ipv4.c 2021-03-24 10:07:39.000000000 +0000 +++ bcm63-7530ax-756/linux-4.19.183/net/ipv4/tcp_ipv4.c 2023-06-28 08:54:21.000000000 +0000 @@ -67,6 +67,10 @@ #include #include #include +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#include +#include +#endif #include #include #include @@ -87,6 +91,8 @@ #include +#include + #ifdef CONFIG_TCP_MD5SIG static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, __be32 daddr, __be32 saddr, const struct tcphdr *th); @@ -435,7 +441,11 @@ struct inet_sock *inet; const int type = icmp_hdr(icmp_skb)->type; const int code = icmp_hdr(icmp_skb)->code; +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) struct sock *sk; +#else + struct sock *sk, *meta_sk; +#endif struct sk_buff *skb; struct request_sock *fastopen; u32 seq, snd_una; @@ -464,13 +474,27 @@ (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) bh_lock_sock(sk); +#else + tp = tcp_sk(sk); + if (mptcp(tp)) + meta_sk = mptcp_meta_sk(sk); + else + meta_sk = sk; + + bh_lock_sock(meta_sk); +#endif /* If too many ICMPs get dropped on busy * servers this needs to be solved differently. * We do take care of PMTU discovery (RFC1191) special case : * we can receive locally generated ICMP messages while socket is held. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) if (sock_owned_by_user(sk)) { +#else + if (sock_owned_by_user(meta_sk)) { +#endif if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)) __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); } @@ -483,7 +507,9 @@ } icsk = inet_csk(sk); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tp = tcp_sk(sk); +#endif /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ fastopen = tp->fastopen_rsk; snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; @@ -517,11 +543,19 @@ goto out; tp->mtu_info = info; +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) if (!sock_owned_by_user(sk)) { +#else + if (!sock_owned_by_user(meta_sk)) { +#endif tcp_v4_mtu_reduced(sk); } else { if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags)) sock_hold(sk); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + if (mptcp(tp)) + mptcp_tsq_flags(sk); +#endif } goto out; } @@ -535,7 +569,11 @@ !icsk->icsk_backoff || fastopen) break; +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) if (sock_owned_by_user(sk)) +#else + if (sock_owned_by_user(meta_sk)) +#endif break; skb = tcp_rtx_queue_head(sk); @@ -558,7 +596,11 @@ } else { /* RTO revert clocked out retransmission. * Will retransmit now */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_retransmit_timer(sk); +#else + tcp_sk(sk)->ops->retransmit_timer(sk); +#endif } break; @@ -578,7 +620,11 @@ if (fastopen && !fastopen->sk) break; +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) if (!sock_owned_by_user(sk)) { +#else + if (!sock_owned_by_user(meta_sk)) { +#endif sk->sk_err = err; sk->sk_error_report(sk); @@ -607,7 +653,11 @@ */ inet = inet_sk(sk); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) if (!sock_owned_by_user(sk) && inet->recverr) { +#else + if (!sock_owned_by_user(meta_sk) && inet->recverr) { +#endif sk->sk_err = err; sk->sk_error_report(sk); } else { /* Only an error on timeout */ @@ -615,7 +665,11 @@ } out: +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) bh_unlock_sock(sk); +#else + bh_unlock_sock(meta_sk); +#endif sock_put(sk); } @@ -650,7 +704,11 @@ * Exception: precedence violation. We do not implement it in any case. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) +#else +void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) +#endif { const struct tcphdr *th = tcp_hdr(skb); struct { @@ -796,10 +854,18 @@ */ static void tcp_v4_send_ack(const struct sock *sk, +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) struct sk_buff *skb, u32 seq, u32 ack, +#else + struct sk_buff *skb, u32 seq, u32 ack, u32 data_ack, +#endif u32 win, u32 tsval, u32 tsecr, int oif, struct tcp_md5sig_key *key, +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) int reply_flags, u8 tos) +#else + int reply_flags, u8 tos, int mptcp) +#endif { const struct tcphdr *th = tcp_hdr(skb); struct { @@ -808,6 +874,12 @@ #ifdef CONFIG_TCP_MD5SIG + (TCPOLEN_MD5SIG_ALIGNED >> 2) #endif +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#ifdef CONFIG_MPTCP + + ((MPTCP_SUB_LEN_DSS >> 2) + + (MPTCP_SUB_LEN_ACK >> 2)) +#endif +#endif ]; } rep; struct net *net = sock_net(sk); @@ -853,6 +925,23 @@ ip_hdr(skb)->daddr, &rep.th); } #endif +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#ifdef CONFIG_MPTCP + if (mptcp) { + int offset = (tsecr) ? 3 : 0; + /* Construction of 32-bit data_ack */ + rep.opt[offset++] = htonl((TCPOPT_MPTCP << 24) | + ((MPTCP_SUB_LEN_DSS + MPTCP_SUB_LEN_ACK) << 16) | + (0x20 << 8) | + (0x01)); + rep.opt[offset] = htonl(data_ack); + + arg.iov[0].iov_len += MPTCP_SUB_LEN_DSS + MPTCP_SUB_LEN_ACK; + rep.th.doff = arg.iov[0].iov_len / 4; + } +#endif /* CONFIG_MPTCP */ + +#endif arg.flags = reply_flags; arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, /* XXX */ @@ -881,28 +970,55 @@ { struct inet_timewait_sock *tw = inet_twsk(sk); struct tcp_timewait_sock *tcptw = tcp_twsk(sk); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + u32 data_ack = 0; + int mptcp = 0; + + if (tcptw->mptcp_tw) { + data_ack = (u32)tcptw->mptcp_tw->rcv_nxt; + mptcp = 1; + } +#endif tcp_v4_send_ack(sk, skb, +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, +#else + tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, data_ack, +#endif tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcp_time_stamp_raw() + tcptw->tw_ts_offset, tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0, +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tw->tw_tos +#else + tw->tw_tos, mptcp +#endif ); inet_twsk_put(tw); } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *req) +#else +void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, + struct request_sock *req) +#endif { /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV * sk->sk_state == TCP_SYN_RECV -> for Fast Open. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : +#else + u32 seq = (sk->sk_state == TCP_LISTEN || is_meta_sk(sk)) ? + tcp_rsk(req)->snt_isn + 1 : +#endif tcp_sk(sk)->snd_nxt; /* RFC 7323 2.3 @@ -911,7 +1027,11 @@ * Rcv.Wind.Shift bits: */ tcp_v4_send_ack(sk, skb, seq, +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_rsk(req)->rcv_nxt, +#else + tcp_rsk(req)->rcv_nxt, 0, +#endif req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, req->ts_recent, @@ -919,7 +1039,11 @@ tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr, AF_INET), inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) ip_hdr(skb)->tos); +#else + ip_hdr(skb)->tos, 0); +#endif } /* @@ -927,11 +1051,19 @@ * This still operates on a request_sock only, not on a big * socket. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, struct tcp_fastopen_cookie *foc, enum tcp_synack_type synack_type) +#else +int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, + struct flowi *fl, + struct request_sock *req, + struct tcp_fastopen_cookie *foc, + enum tcp_synack_type synack_type) +#endif { const struct inet_request_sock *ireq = inet_rsk(req); struct flowi4 fl4; @@ -961,7 +1093,11 @@ /* * IPv4 request_sock destructor. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static void tcp_v4_reqsk_destructor(struct request_sock *req) +#else +void tcp_v4_reqsk_destructor(struct request_sock *req) +#endif { kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1)); } @@ -1343,9 +1479,16 @@ return false; } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static void tcp_v4_init_req(struct request_sock *req, const struct sock *sk_listener, struct sk_buff *skb) +#else +static int tcp_v4_init_req(struct request_sock *req, + const struct sock *sk_listener, + struct sk_buff *skb, + bool want_cookie) +#endif { struct inet_request_sock *ireq = inet_rsk(req); struct net *net = sock_net(sk_listener); @@ -1353,6 +1496,10 @@ sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb)); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + + return 0; +#endif } static struct dst_entry *tcp_v4_route_req(const struct sock *sk, @@ -1372,7 +1519,11 @@ .syn_ack_timeout = tcp_syn_ack_timeout, }; +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { +#else +const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { +#endif .mss_clamp = TCP_MSS_DEFAULT, #ifdef CONFIG_TCP_MD5SIG .req_md5_lookup = tcp_v4_md5_lookup, @@ -1509,7 +1660,11 @@ } EXPORT_SYMBOL(tcp_v4_syn_recv_sock); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb) +#else +struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb) +#endif { #ifdef CONFIG_SYN_COOKIES const struct tcphdr *th = tcp_hdr(skb); @@ -1532,6 +1687,11 @@ { struct sock *rsk; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + if (is_meta_sk(sk)) + return mptcp_v4_do_rcv(sk, skb); + +#endif if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ struct dst_entry *dst = sk->sk_rx_dst; @@ -1683,6 +1843,12 @@ TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff * 4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#ifdef CONFIG_MPTCP + TCP_SKB_CB(skb)->mptcp_flags = 0; + TCP_SKB_CB(skb)->dss_off = 0; +#endif +#endif TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); TCP_SKB_CB(skb)->tcp_tw_isn = 0; TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); @@ -1691,6 +1857,9 @@ skb->tstamp || skb_hwtstamps(skb)->hwtstamp; } +#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_LOCALIN_TCP) +extern int bcm_tcp_v4_blog_emit(struct sk_buff *skb, struct sock *sk); +#endif /* * From tcp_input.c */ @@ -1701,8 +1870,13 @@ int sdif = inet_sdif(skb); const struct iphdr *iph; const struct tcphdr *th; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + struct sock *sk, *meta_sk = NULL; +#endif bool refcounted; +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) struct sock *sk; +#endif int ret; if (skb->pkt_type != PACKET_HOST) @@ -1738,9 +1912,16 @@ goto no_tcp_socket; process: + avm_pa_add_local_session(AVM_PA_NET_IP_DEVINFO(net), skb, sk); + if (sk->sk_state == TCP_TIME_WAIT) goto do_time_wait; +#if defined(CONFIG_BCM_KF_BLOG) && defined(CONFIG_BLOG_LOCALIN_TCP) + /*learn local terminated tcp traffic */ + bcm_tcp_v4_blog_emit(skb, sk); +#endif + if (sk->sk_state == TCP_NEW_SYN_RECV) { struct request_sock *req = inet_reqsk(sk); bool req_stolen = false; @@ -1756,7 +1937,15 @@ reqsk_put(req); goto csum_error; } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) if (unlikely(sk->sk_state != TCP_LISTEN)) { +#else + if (unlikely(sk->sk_state != TCP_LISTEN && !is_meta_sk(sk))) { + inet_csk_reqsk_queue_drop_and_put(sk, req); + goto lookup; + } + if (unlikely(is_meta_sk(sk) && !mptcp_can_new_subflow(sk))) { +#endif inet_csk_reqsk_queue_drop_and_put(sk, req); goto lookup; } @@ -1765,6 +1954,9 @@ */ sock_hold(sk); refcounted = true; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + +#endif nsk = NULL; if (!tcp_filter(sk, skb)) { th = (const struct tcphdr *)skb->data; @@ -1825,15 +2017,40 @@ sk_incoming_cpu_update(sk); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) bh_lock_sock_nested(sk); +#else + if (mptcp(tcp_sk(sk))) { + meta_sk = mptcp_meta_sk(sk); + + bh_lock_sock_nested(meta_sk); + if (sock_owned_by_user(meta_sk)) + mptcp_prepare_for_backlog(sk, skb); + } else { + meta_sk = sk; + bh_lock_sock_nested(sk); + } +#endif tcp_segs_in(tcp_sk(sk), skb); ret = 0; +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) if (!sock_owned_by_user(sk)) { +#else + if (!sock_owned_by_user(meta_sk)) { +#endif ret = tcp_v4_do_rcv(sk, skb); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) } else if (tcp_add_backlog(sk, skb)) { +#else + } else if (tcp_add_backlog(meta_sk, skb)) { +#endif goto discard_and_relse; } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) bh_unlock_sock(sk); +#else + bh_unlock_sock(meta_sk); +#endif put_and_return: if (refcounted) @@ -1847,6 +2064,21 @@ tcp_v4_fill_cb(skb, iph, th); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#ifdef CONFIG_MPTCP + if (!sk && th->syn && !th->ack) { + int ret = mptcp_lookup_join(skb, NULL); + + if (ret < 0) { + tcp_v4_send_reset(NULL, skb); + goto discard_it; + } else if (ret > 0) { + return 0; + } + } +#endif + +#endif if (tcp_checksum_complete(skb)) { csum_error: __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); @@ -1895,6 +2127,20 @@ refcounted = false; goto process; } +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#ifdef CONFIG_MPTCP + if (th->syn && !th->ack) { + int ret = mptcp_lookup_join(skb, inet_twsk(sk)); + + if (ret < 0) { + tcp_v4_send_reset(NULL, skb); + goto discard_it; + } else if (ret > 0) { + return 0; + } + } +#endif +#endif } /* to ACK */ /* fall through */ @@ -1964,7 +2210,16 @@ tcp_init_sock(sk); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) icsk->icsk_af_ops = &ipv4_specific; +#else +#ifdef CONFIG_MPTCP + if (sock_flag(sk, SOCK_MPTCP)) + icsk->icsk_af_ops = &mptcp_v4_specific; + else +#endif + icsk->icsk_af_ops = &ipv4_specific; +#endif #ifdef CONFIG_TCP_MD5SIG tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific; @@ -1983,6 +2238,13 @@ tcp_cleanup_congestion_control(sk); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + if (mptcp(tp)) + mptcp_destroy_sock(sk); + if (tp->inside_tk_table) + mptcp_hash_remove_bh(tp); + +#endif tcp_cleanup_ulp(sk); /* Cleanup up the write buffer. */ @@ -2487,6 +2749,13 @@ .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem), .max_header = MAX_TCP_HEADER, .obj_size = sizeof(struct tcp_sock), +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#ifdef CONFIG_MPTCP + .useroffset = offsetof(struct tcp_sock, mptcp_sched_name), + .usersize = sizeof_field(struct tcp_sock, mptcp_sched_name) + + sizeof_field(struct tcp_sock, mptcp_pm_name), +#endif +#endif .slab_flags = SLAB_TYPESAFE_BY_RCU, .twsk_prot = &tcp_timewait_sock_ops, .rsk_prot = &tcp_request_sock_ops, @@ -2497,6 +2766,11 @@ .compat_getsockopt = compat_tcp_getsockopt, #endif .diag_destroy = tcp_abort, +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#ifdef CONFIG_MPTCP + .clear_sk = mptcp_clear_sk, +#endif +#endif }; EXPORT_SYMBOL(tcp_prot);