--- zzzz-none-000/linux-4.1.52/include/net/tcp.h 2018-05-28 02:26:45.000000000 +0000 +++ bcm63-7530ax-731/linux-4.1.52/include/net/tcp.h 2022-03-02 11:37:13.000000000 +0000 @@ -54,6 +54,11 @@ #define MAX_TCP_HEADER (128 + MAX_HEADER) #define MAX_TCP_OPTION_SPACE 40 +#if defined(CONFIG_BCM_KF_MISC_BACKPORTS) +/*CVE-2019-11477*/ +#define TCP_MIN_SND_MSS 48 +#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE) +#endif //#if defined(CONFIG_BCM_KF_MISC_BACKPORTS) /* * Never offer a window over 32767 without using window scaling. Some @@ -179,6 +184,9 @@ #define TCPOPT_SACK 5 /* SACK Block */ #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ +#if defined(CONFIG_BCM_MPTCP) && defined(CONFIG_BCM_KF_MPTCP) +#define TCPOPT_MPTCP 30 +#endif #define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */ #define TCPOPT_EXP 254 /* Experimental */ /* Magic number to be after the option value for sharing TCP @@ -233,6 +241,29 @@ #define TFO_SERVER_WO_SOCKOPT1 0x400 #define TFO_SERVER_WO_SOCKOPT2 0x800 +#if defined(CONFIG_BCM_MPTCP) && defined(CONFIG_BCM_KF_MPTCP) +/* Flags from tcp_input.c for tcp_ack */ +#define FLAG_DATA 0x01 /* Incoming frame contained data. */ +#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ +#define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */ +#define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */ +#define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ +#define FLAG_DATA_SACKED 0x20 /* New SACK. */ +#define FLAG_ECE 0x40 /* ECE in this ACK */ +#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ +#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ +#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ +#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ +#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ +#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ +#define MPTCP_FLAG_DATA_ACKED 0x8000 + +#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) +#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) +#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) +#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) + +#endif extern struct inet_timewait_death_row tcp_death_row; /* sysctl variables for tcp */ @@ -347,6 +378,104 @@ #define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val) #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) +#if defined(CONFIG_BCM_MPTCP) && defined(CONFIG_BCM_KF_MPTCP) +/**** START - Exports needed for MPTCP ****/ +extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops; +extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops; + +struct mptcp_options_received; + +void tcp_cleanup_rbuf(struct sock *sk, int copied); +void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited); +void tcp_enter_quickack_mode(struct sock *sk); +int tcp_close_state(struct sock *sk); +void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, + const struct sk_buff *skb); +int tcp_xmit_probe_skb(struct sock *sk, int urgent); +void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb); +int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, + gfp_t gfp_mask); +unsigned int tcp_mss_split_point(const struct sock *sk, + const struct sk_buff *skb, + unsigned int mss_now, + unsigned int max_segs, + int nonagle); +bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, + unsigned int cur_mss, int nonagle); +bool tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb, + unsigned int cur_mss); +unsigned int tcp_cwnd_test(const struct tcp_sock *tp, const struct sk_buff *skb); +int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb, + unsigned int mss_now); +int __pskb_trim_head(struct sk_buff *skb, int len); +void tcp_queue_skb(struct sock *sk, struct sk_buff *skb); +void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags); +void tcp_reset(struct sock *sk); +bool tcp_may_update_window(const struct tcp_sock *tp, const u32 ack, + const u32 ack_seq, const u32 nwin); +bool tcp_urg_mode(const struct tcp_sock *tp); +void tcp_ack_probe(struct sock *sk); +void tcp_rearm_rto(struct sock *sk); +int tcp_write_timeout(struct sock *sk); +bool retransmits_timed_out(struct sock *sk, unsigned int boundary, + unsigned int timeout, bool syn_set); +void tcp_write_err(struct sock *sk); +void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr); +void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, + unsigned int mss_now); + +void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, + struct request_sock *req); +int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, + struct flowi *fl, + struct request_sock *req, + u16 queue_mapping, + struct tcp_fastopen_cookie *foc); +void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb); +struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb); +void tcp_v4_reqsk_destructor(struct request_sock *req); + +void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, + struct request_sock *req); +int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst, + struct flowi *fl, struct request_sock *req, + u16 queue_mapping, struct tcp_fastopen_cookie *foc); +void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); +int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); +int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); +void tcp_v6_destroy_sock(struct sock *sk); +void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); +void tcp_v6_hash(struct sock *sk); +struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb); +struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, + struct request_sock *req, + struct dst_entry *dst); +void tcp_v6_reqsk_destructor(struct request_sock *req); + +unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, + int large_allowed); +u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb); + +void skb_clone_fraglist(struct sk_buff *skb); +void copy_skb_header(struct sk_buff *new, const struct sk_buff *old); + +void inet_twsk_free(struct inet_timewait_sock *tw); +int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb); +/* These states need RST on ABORT according to RFC793 */ +static inline bool tcp_need_reset(int state) +{ + return (1 << state) & + (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | + TCPF_FIN_WAIT2 | TCPF_SYN_RECV); +} + +int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, + bool *fragstolen); +bool tcp_try_coalesce(struct sock *sk, struct sk_buff *to, + struct sk_buff *from, bool *fragstolen); +/**** END - Exports needed for MPTCP ****/ + +#endif void tcp_tasklet_init(void); void tcp_v4_err(struct sk_buff *skb, u32); @@ -440,7 +569,13 @@ int flags, int *addr_len); void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx, +#if !defined(CONFIG_BCM_MPTCP) || !defined(CONFIG_BCM_KF_MPTCP) int estab, struct tcp_fastopen_cookie *foc); +#else + struct mptcp_options_received *mopt_rx, + int estab, struct tcp_fastopen_cookie *foc, + struct tcp_sock *tp); +#endif const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); /* @@ -449,6 +584,9 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); void tcp_v4_mtu_reduced(struct sock *sk); +#if defined(CONFIG_BCM_MPTCP) && defined(CONFIG_BCM_KF_MPTCP) +void tcp_v6_mtu_reduced(struct sock *sk); +#endif void tcp_req_err(struct sock *sk, u32 seq); int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); struct sock *tcp_create_openreq_child(struct sock *sk, @@ -495,8 +633,13 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, u16 *mssp); +#if !defined(CONFIG_BCM_MPTCP) || !defined(CONFIG_BCM_KF_MPTCP) __u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mss); +#else +__u32 cookie_v4_init_sequence(struct request_sock *req, struct sock *sk, + const struct sk_buff *skb, __u16 *mss); +#endif __u32 cookie_init_timestamp(struct request_sock *req); bool cookie_timestamp_decode(struct tcp_options_received *opt); bool cookie_ecn_ok(const struct tcp_options_received *opt, @@ -509,8 +652,13 @@ u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, const struct tcphdr *th, u16 *mssp); +#if !defined(CONFIG_BCM_MPTCP) || !defined(CONFIG_BCM_KF_MPTCP) __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mss); +#else +__u32 cookie_v6_init_sequence(struct request_sock *req, struct sock *sk, + const struct sk_buff *skb, __u16 *mss); +#endif #endif /* tcp_output.c */ @@ -537,10 +685,22 @@ void tcp_send_loss_probe(struct sock *sk); bool tcp_schedule_loss_probe(struct sock *sk); +#if defined(CONFIG_BCM_MPTCP) && defined(CONFIG_BCM_KF_MPTCP) +u16 tcp_select_window(struct sock *sk); +int select_size(const struct sock *sk, bool sg); +bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, + int push_one, gfp_t gfp); + +#endif /* tcp_input.c */ void tcp_resume_early_retransmit(struct sock *sk); void tcp_rearm_rto(struct sock *sk); void tcp_reset(struct sock *sk); +#if defined(CONFIG_BCM_MPTCP) && defined(CONFIG_BCM_KF_MPTCP) +void tcp_set_rto(struct sock *sk); +bool tcp_should_expand_sndbuf(const struct sock *sk); +bool tcp_prune_ofo_queue(struct sock *sk); +#endif /* tcp_timer.c */ void tcp_init_xmit_timers(struct sock *); @@ -711,6 +871,14 @@ __u32 tcp_tw_isn; __u32 tcp_gso_segs; }; +#if defined(CONFIG_BCM_MPTCP) && defined(CONFIG_BCM_KF_MPTCP) + +#ifdef CONFIG_MPTCP + __u8 mptcp_flags; /* flags for the MPTCP layer */ + __u8 dss_off; /* Number of 4-byte words until + * seq-number */ +#endif +#endif __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ __u8 sacked; /* State flags for SACK/FACK. */ @@ -726,12 +894,31 @@ __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ /* 1 byte hole */ __u32 ack_seq; /* Sequence number ACK'd */ +#if defined(CONFIG_BCM_MPTCP) && defined(CONFIG_BCM_KF_MPTCP) + union { + union { + struct inet_skb_parm h4; +#if IS_ENABLED(CONFIG_IPV6) + struct inet6_skb_parm h6; +#endif + } header; /* For incoming frames */ +#ifdef CONFIG_MPTCP + union { /* For MPTCP outgoing frames */ + __u32 path_mask; /* paths that tried to send this skb */ + __u32 dss[6]; /* DSS options */ + }; + }; +#endif + +#else union { struct inet_skb_parm h4; #if IS_ENABLED(CONFIG_IPV6) struct inet6_skb_parm h6; #endif } header; /* For incoming frames */ + +#endif }; #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) @@ -997,7 +1184,7 @@ /* The maximum number of MSS of available cwnd for which TSO defers * sending if not using sysctl_tcp_tso_win_divisor. */ -static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp) +static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp __maybe_unused) { return 3; } @@ -1119,7 +1306,12 @@ /* Determine a window scaling and initial window to offer. */ void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd, __u32 *window_clamp, int wscale_ok, +#if !defined(CONFIG_BCM_MPTCP) || !defined(CONFIG_BCM_KF_MPTCP) __u8 *rcv_wscale, __u32 init_rcv_wnd); +#else + __u8 *rcv_wscale, __u32 init_rcv_wnd, + const struct sock *sk); +#endif static inline int tcp_win_from_space(int space) { @@ -1130,6 +1322,21 @@ space - (space>>tcp_adv_win_scale); } +#if defined(CONFIG_BCM_MPTCP) && defined(CONFIG_BCM_KF_MPTCP) +#ifdef CONFIG_MPTCP +extern struct static_key mptcp_static_key; +static inline bool mptcp(const struct tcp_sock *tp) +{ + return static_key_false(&mptcp_static_key) && tp->mpc; +} +#else +static inline bool mptcp(const struct tcp_sock *tp) +{ + return 0; +} +#endif + +#endif /* Note: caller must be prepared to deal with negative returns */ static inline int tcp_space(const struct sock *sk) { @@ -1149,12 +1356,12 @@ static inline int keepalive_intvl_when(const struct tcp_sock *tp) { - return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl; + return (int)tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl; } static inline int keepalive_time_when(const struct tcp_sock *tp) { - return tp->keepalive_time ? : sysctl_tcp_keepalive_time; + return (int)tp->keepalive_time ? : sysctl_tcp_keepalive_time; } static inline int keepalive_probes(const struct tcp_sock *tp) @@ -1186,7 +1393,7 @@ { if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) return true; - if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) + if (unlikely((long)get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) return true; /* * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, @@ -1216,7 +1423,7 @@ However, we can relax time bounds for RST segments to MSL. */ - if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) + if (rst && (long)get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) return false; return true; } @@ -1316,9 +1523,9 @@ int family); #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key) #else -static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, - const union tcp_md5_addr *addr, - int family) +static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk __maybe_unused, + const union tcp_md5_addr *addr __maybe_unused, + int family __maybe_unused) { return NULL; } @@ -1633,6 +1840,33 @@ #endif }; +#if defined(CONFIG_BCM_MPTCP) && defined(CONFIG_BCM_KF_MPTCP) +/* TCP/MPTCP-specific functions */ +struct tcp_sock_ops { + u32 (*__select_window)(struct sock *sk); + u16 (*select_window)(struct sock *sk); + void (*select_initial_window)(int __space, __u32 mss, __u32 *rcv_wnd, + __u32 *window_clamp, int wscale_ok, + __u8 *rcv_wscale, __u32 init_rcv_wnd, + const struct sock *sk); + int (*select_size)(const struct sock *sk, bool sg); + void (*init_buffer_space)(struct sock *sk); + void (*set_rto)(struct sock *sk); + bool (*should_expand_sndbuf)(const struct sock *sk); + void (*send_fin)(struct sock *sk); + bool (*write_xmit)(struct sock *sk, unsigned int mss_now, int nonagle, + int push_one, gfp_t gfp); + void (*send_active_reset)(struct sock *sk, gfp_t priority); + int (*write_wakeup)(struct sock *sk); + bool (*prune_ofo_queue)(struct sock *sk); + void (*retransmit_timer)(struct sock *sk); + void (*time_wait)(struct sock *sk, int state, int timeo); + void (*cleanup_rbuf)(struct sock *sk, int copied); + void (*cwnd_validate)(struct sock *sk, bool is_cwnd_limited); +}; +extern const struct tcp_sock_ops tcp_specific; + +#endif struct tcp_request_sock_ops { u16 mss_clamp; #ifdef CONFIG_TCP_MD5SIG @@ -1643,11 +1877,21 @@ const struct sock *sk, const struct sk_buff *skb); #endif +#if !defined(CONFIG_BCM_MPTCP) || !defined(CONFIG_BCM_KF_MPTCP) void (*init_req)(struct request_sock *req, struct sock *sk, struct sk_buff *skb); +#else + int (*init_req)(struct request_sock *req, struct sock *sk_listener, + struct sk_buff *skb, bool want_cookie); +#endif #ifdef CONFIG_SYN_COOKIES +#if !defined(CONFIG_BCM_MPTCP) || !defined(CONFIG_BCM_KF_MPTCP) __u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb, __u16 *mss); +#else + __u32 (*cookie_init_seq)(struct request_sock *req, struct sock *sk, + const struct sk_buff *skb, __u16 *mss); +#endif #endif struct dst_entry *(*route_req)(struct sock *sk, struct flowi *fl, const struct request_sock *req, @@ -1661,14 +1905,28 @@ }; #ifdef CONFIG_SYN_COOKIES +#if !defined(CONFIG_BCM_MPTCP) || !defined(CONFIG_BCM_KF_MPTCP) static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, +#else +static inline __u32 cookie_init_sequence(struct request_sock *req, + const struct tcp_request_sock_ops *ops, +#endif struct sock *sk, struct sk_buff *skb, __u16 *mss) { +#if !defined(CONFIG_BCM_MPTCP) || !defined(CONFIG_BCM_KF_MPTCP) return ops->cookie_init_seq(sk, skb, mss); +#else + return ops->cookie_init_seq(req, sk, skb, mss); +#endif } #else +#if !defined(CONFIG_BCM_MPTCP) || !defined(CONFIG_BCM_KF_MPTCP) static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, +#else +static inline __u32 cookie_init_sequence(struct request_sock *req, + const struct tcp_request_sock_ops *ops, +#endif struct sock *sk, struct sk_buff *skb, __u16 *mss) {