--- zzzz-none-000/linux-4.19.183/include/net/tcp.h 2021-03-24 10:07:39.000000000 +0000 +++ bcm63-7530ax-756/linux-4.19.183/include/net/tcp.h 2023-06-28 08:54:20.000000000 +0000 @@ -185,6 +185,9 @@ #define TCPOPT_SACK 5 /* SACK Block */ #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#define TCPOPT_MPTCP 30 +#endif #define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */ #define TCPOPT_EXP 254 /* Experimental */ /* Magic number to be after the option value for sharing TCP @@ -241,6 +244,33 @@ */ #define TFO_SERVER_WO_SOCKOPT1 0x400 +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +/* Flags from tcp_input.c for tcp_ack */ +#define FLAG_DATA 0x01 /* Incoming frame contained data. */ +#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ +#define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */ +#define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */ +#define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ +#define FLAG_DATA_SACKED 0x20 /* New SACK. */ +#define FLAG_ECE 0x40 /* ECE in this ACK */ +#define FLAG_LOST_RETRANS 0x80 /* This ACK marks some retransmission lost */ +#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ +#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ +#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ +#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ +#define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */ +#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ +#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ +#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ +#define FLAG_ACK_MAYBE_DELAYED 0x10000 /* Likely a delayed ACK */ + +#define MPTCP_FLAG_DATA_ACKED 0x20000 + +#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) +#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) +#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE|FLAG_DSACKING_ACK) +#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) +#endif /* sysctl variables for tcp */ extern int sysctl_tcp_max_orphans; @@ -313,6 +343,98 @@ #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +/**** START - Exports needed for MPTCP ****/ +extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops; +extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops; + +struct mptcp_options_received; + +void tcp_cleanup_rbuf(struct sock *sk, int copied); +void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited); +int tcp_close_state(struct sock *sk); +void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, + const struct sk_buff *skb); +int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib); +void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb); +int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, + gfp_t gfp_mask); +unsigned int tcp_mss_split_point(const struct sock *sk, + const struct sk_buff *skb, + unsigned int mss_now, + unsigned int max_segs, + int nonagle); +bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, + unsigned int cur_mss, int nonagle); +bool tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb, + unsigned int cur_mss); +unsigned int tcp_cwnd_test(const struct tcp_sock *tp, const struct sk_buff *skb); +int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now); +int __pskb_trim_head(struct sk_buff *skb, int len); +void tcp_queue_skb(struct sock *sk, struct sk_buff *skb); +void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags); +void tcp_reset(struct sock *sk); +bool tcp_may_update_window(const struct tcp_sock *tp, const u32 ack, + const u32 ack_seq, const u32 nwin); +bool tcp_urg_mode(const struct tcp_sock *tp); +void tcp_ack_probe(struct sock *sk); +void tcp_rearm_rto(struct sock *sk); +int tcp_write_timeout(struct sock *sk); +bool retransmits_timed_out(struct sock *sk, + unsigned int boundary, + unsigned int timeout); +void tcp_write_err(struct sock *sk); +void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr); +void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb); +void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now); + +void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, + struct request_sock *req); +void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb); +struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb); +void tcp_v4_reqsk_destructor(struct request_sock *req); + +void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, + struct request_sock *req); +void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb); +struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb); +int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); +int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); +void tcp_v6_destroy_sock(struct sock *sk); +void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); +void tcp_v6_hash(struct sock *sk); +struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb); +struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, + struct request_sock *req, + struct dst_entry *dst, + struct request_sock *req_unhash, + bool *own_req); +void tcp_v6_reqsk_destructor(struct request_sock *req); + +unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, + int large_allowed); +u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb); + +void skb_clone_fraglist(struct sk_buff *skb); + +void inet_twsk_free(struct inet_timewait_sock *tw); +int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb); +/* These states need RST on ABORT according to RFC793 */ +static inline bool tcp_need_reset(int state) +{ + return (1 << state) & + (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | + TCPF_FIN_WAIT2 | TCPF_SYN_RECV); +} + +int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, + bool *fragstolen); +void tcp_ofo_queue(struct sock *sk); +void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb); +int linear_payload_sz(bool first_skb); +/**** END - Exports needed for MPTCP ****/ + +#endif void tcp_tasklet_init(void); void tcp_v4_err(struct sk_buff *skb, u32); @@ -412,7 +534,13 @@ struct vm_area_struct *vma); void tcp_parse_options(const struct net *net, const struct sk_buff *skb, struct tcp_options_received *opt_rx, +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) int estab, struct tcp_fastopen_cookie *foc); +#else + struct mptcp_options_received *mopt_rx, + int estab, struct tcp_fastopen_cookie *foc, + struct tcp_sock *tp); +#endif const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); /* @@ -421,6 +549,9 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); void tcp_v4_mtu_reduced(struct sock *sk); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +void tcp_v6_mtu_reduced(struct sock *sk); +#endif void tcp_req_err(struct sock *sk, u32 seq, bool abort); int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); struct sock *tcp_create_openreq_child(const struct sock *sk, @@ -538,7 +669,12 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, u16 *mssp); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss); +#else +__u32 cookie_v4_init_sequence(struct request_sock *req, const struct sock *sk, + const struct sk_buff *skb, __u16 *mss); +#endif u64 cookie_init_timestamp(struct request_sock *req); bool cookie_timestamp_decode(const struct net *net, struct tcp_options_received *opt); @@ -552,7 +688,12 @@ u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, const struct tcphdr *th, u16 *mssp); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss); +#else +__u32 cookie_v6_init_sequence(struct request_sock *req, const struct sock *sk, + const struct sk_buff *skb, __u16 *mss); +#endif #endif /* tcp_output.c */ @@ -588,10 +729,20 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb, const struct sk_buff *next_skb); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +u16 tcp_select_window(struct sock *sk); +bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, + int push_one, gfp_t gfp); + +#endif /* tcp_input.c */ void tcp_rearm_rto(struct sock *sk); void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); void tcp_reset(struct sock *sk); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +void tcp_set_rto(struct sock *sk); +bool tcp_should_expand_sndbuf(const struct sock *sk); +#endif void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb); void tcp_fin(struct sock *sk); @@ -635,7 +786,11 @@ } /* tcp.c */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) void tcp_get_info(struct sock *, struct tcp_info *); +#else +void tcp_get_info(struct sock *, struct tcp_info *, bool no_lock); +#endif /* Read 'sendfile()'-style from a TCP socket */ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, @@ -823,6 +978,14 @@ u16 tcp_gso_size; }; }; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + +#ifdef CONFIG_MPTCP + __u8 mptcp_flags; /* flags for the MPTCP layer */ + __u8 dss_off; /* Number of 4-byte words until + * seq-number */ +#endif +#endif __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ __u8 sacked; /* State flags for SACK. */ @@ -841,6 +1004,16 @@ has_rxtstamp:1, /* SKB has a RX timestamp */ unused:5; __u32 ack_seq; /* Sequence number ACK'd */ +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + +#ifdef CONFIG_MPTCP + union { /* For MPTCP outgoing frames */ + __u32 path_mask; /* paths that tried to send this skb */ + __u32 dss[6]; /* DSS options */ + }; +#endif + +#endif union { struct { /* There is space for up to 24 bytes */ @@ -1361,6 +1534,21 @@ space - (space>>tcp_adv_win_scale); } +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#ifdef CONFIG_MPTCP +extern struct static_key mptcp_static_key; +static inline bool mptcp(const struct tcp_sock *tp) +{ + return static_key_false(&mptcp_static_key) && tp->mpc; +} +#else +static inline bool mptcp(const struct tcp_sock *tp) +{ + return 0; +} +#endif + +#endif /* Note: caller must be prepared to deal with negative returns */ static inline int tcp_space(const struct sock *sk) { @@ -1911,6 +2099,32 @@ #endif }; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +/* TCP/MPTCP-specific functions */ +struct tcp_sock_ops { + u32 (*__select_window)(struct sock *sk); + u16 (*select_window)(struct sock *sk); + void (*select_initial_window)(const struct sock *sk, int __space, + __u32 mss, __u32 *rcv_wnd, + __u32 *window_clamp, int wscale_ok, + __u8 *rcv_wscale, __u32 init_rcv_wnd); + int (*select_size)(const struct sock *sk, bool first_skb, bool zc); + void (*init_buffer_space)(struct sock *sk); + void (*set_rto)(struct sock *sk); + bool (*should_expand_sndbuf)(const struct sock *sk); + void (*send_fin)(struct sock *sk); + bool (*write_xmit)(struct sock *sk, unsigned int mss_now, int nonagle, + int push_one, gfp_t gfp); + void (*send_active_reset)(struct sock *sk, gfp_t priority); + int (*write_wakeup)(struct sock *sk, int mib); + void (*retransmit_timer)(struct sock *sk); + void (*time_wait)(struct sock *sk, int state, int timeo); + void (*cleanup_rbuf)(struct sock *sk, int copied); + void (*cwnd_validate)(struct sock *sk, bool is_cwnd_limited); +}; +extern const struct tcp_sock_ops tcp_specific; + +#endif struct tcp_request_sock_ops { u16 mss_clamp; #ifdef CONFIG_TCP_MD5SIG @@ -1921,12 +2135,24 @@ const struct sock *sk, const struct sk_buff *skb); #endif +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) void (*init_req)(struct request_sock *req, const struct sock *sk_listener, struct sk_buff *skb); +#else + int (*init_req)(struct request_sock *req, + const struct sock *sk_listener, + struct sk_buff *skb, + bool want_cookie); +#endif #ifdef CONFIG_SYN_COOKIES +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) __u32 (*cookie_init_seq)(const struct sk_buff *skb, __u16 *mss); +#else + __u32 (*cookie_init_seq)(struct request_sock *req, const struct sock *sk, + const struct sk_buff *skb, __u16 *mss); +#endif #endif struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl, const struct request_sock *req); @@ -1940,15 +2166,25 @@ #ifdef CONFIG_SYN_COOKIES static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + struct request_sock *req, +#endif const struct sock *sk, struct sk_buff *skb, __u16 *mss) { tcp_synq_overflow(sk); __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) return ops->cookie_init_seq(skb, mss); +#else + return ops->cookie_init_seq(req, sk, skb, mss); +#endif } #else static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + struct request_sock *req, +#endif const struct sock *sk, struct sk_buff *skb, __u16 *mss) {