--- zzzz-none-000/linux-3.10.107/include/net/inet_connection_sock.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/include/net/inet_connection_sock.h 2021-02-04 17:41:59.000000000 +0000 @@ -36,14 +36,16 @@ * (i.e. things that depend on the address family) */ struct inet_connection_sock_af_ops { - int (*queue_xmit)(struct sk_buff *skb, struct flowi *fl); + int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl); void (*send_check)(struct sock *sk, struct sk_buff *skb); int (*rebuild_header)(struct sock *sk); void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb); int (*conn_request)(struct sock *sk, struct sk_buff *skb); - struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb, + struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, - struct dst_entry *dst); + struct dst_entry *dst, + struct request_sock *req_unhash, + bool *own_req); u16 net_header_len; u16 net_frag_header_len; u16 sockaddr_len; @@ -98,7 +100,9 @@ const struct tcp_congestion_ops *icsk_ca_ops; const struct inet_connection_sock_af_ops *icsk_af_ops; unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu); - __u8 icsk_ca_state; + __u8 icsk_ca_state:6, + icsk_ca_setsockopt:1, + icsk_ca_dst_locked:1; __u8 icsk_retransmits; __u8 icsk_pending; __u8 icsk_backoff; @@ -125,10 +129,13 @@ /* Information on the current probe. */ int probe_size; + + u32 probe_timestamp; } icsk_mtup; - u32 icsk_ca_priv[16]; u32 icsk_user_timeout; -#define ICSK_CA_PRIV_SIZE (16 * sizeof(u32)) + + u64 icsk_ca_priv[64 / sizeof(u64)]; +#define ICSK_CA_PRIV_SIZE (8 * sizeof(u64)) }; #define ICSK_TIME_RETRANS 1 /* Retransmit timer */ @@ -147,9 +154,9 @@ return (void *)inet_csk(sk)->icsk_ca_priv; } -extern struct sock *inet_csk_clone_lock(const struct sock *sk, - const struct request_sock *req, - const gfp_t priority); +struct sock *inet_csk_clone_lock(const struct sock *sk, + const struct request_sock *req, + const gfp_t priority); enum inet_csk_ack_state_t { ICSK_ACK_SCHED = 1, @@ -158,11 +165,11 @@ ICSK_ACK_PUSHED2 = 8 }; -extern void inet_csk_init_xmit_timers(struct sock *sk, - void (*retransmit_handler)(unsigned long), - void (*delack_handler)(unsigned long), - void (*keepalive_handler)(unsigned long)); -extern void inet_csk_clear_xmit_timers(struct sock *sk); +void inet_csk_init_xmit_timers(struct sock *sk, + void (*retransmit_handler)(unsigned long), + void (*delack_handler)(unsigned long), + void (*keepalive_handler)(unsigned long)); +void inet_csk_clear_xmit_timers(struct sock *sk); static inline void inet_csk_schedule_ack(struct sock *sk) { @@ -179,8 +186,8 @@ memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); } -extern void inet_csk_delete_keepalive_timer(struct sock *sk); -extern void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout); +void inet_csk_delete_keepalive_timer(struct sock *sk); +void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout); #ifdef INET_CSK_DEBUG extern const char inet_csk_timer_bug_msg[]; @@ -242,47 +249,39 @@ #endif } -extern struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); +static inline unsigned long +inet_csk_rto_backoff(const struct inet_connection_sock *icsk, + unsigned long max_when) +{ + u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff; -extern struct request_sock *inet_csk_search_req(const struct sock *sk, - struct request_sock ***prevp, - const __be16 rport, - const __be32 raddr, - const __be32 laddr); -extern int inet_csk_bind_conflict(const struct sock *sk, - const struct inet_bind_bucket *tb, bool relax); -extern int inet_csk_get_port(struct sock *sk, unsigned short snum); + return (unsigned long)min_t(u64, when, max_when); +} -extern struct dst_entry* inet_csk_route_req(struct sock *sk, - struct flowi4 *fl4, - const struct request_sock *req); -extern struct dst_entry* inet_csk_route_child_sock(struct sock *sk, - struct sock *newsk, - const struct request_sock *req); +struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); -static inline void inet_csk_reqsk_queue_add(struct sock *sk, - struct request_sock *req, - struct sock *child) -{ - reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child); -} +int inet_csk_bind_conflict(const struct sock *sk, + const struct inet_bind_bucket *tb, bool relax); +int inet_csk_get_port(struct sock *sk, unsigned short snum); -extern void inet_csk_reqsk_queue_hash_add(struct sock *sk, - struct request_sock *req, - unsigned long timeout); +struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4, + const struct request_sock *req); +struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, + struct sock *newsk, + const struct request_sock *req); -static inline void inet_csk_reqsk_queue_removed(struct sock *sk, - struct request_sock *req) -{ - if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0) - inet_csk_delete_keepalive_timer(sk); -} +struct sock *inet_csk_reqsk_queue_add(struct sock *sk, + struct request_sock *req, + struct sock *child); +void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, + unsigned long timeout); +struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, + struct request_sock *req, + bool own_req); -static inline void inet_csk_reqsk_queue_added(struct sock *sk, - const unsigned long timeout) +static inline void inet_csk_reqsk_queue_added(struct sock *sk) { - if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0) - inet_csk_reset_keepalive_timer(sk, timeout); + reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); } static inline int inet_csk_reqsk_queue_len(const struct sock *sk) @@ -297,32 +296,14 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) { - return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue); -} - -static inline void inet_csk_reqsk_queue_unlink(struct sock *sk, - struct request_sock *req, - struct request_sock **prev) -{ - reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req, prev); -} - -static inline void inet_csk_reqsk_queue_drop(struct sock *sk, - struct request_sock *req, - struct request_sock **prev) -{ - inet_csk_reqsk_queue_unlink(sk, req, prev); - inet_csk_reqsk_queue_removed(sk, req); - reqsk_free(req); + return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog; } -extern void inet_csk_reqsk_queue_prune(struct sock *parent, - const unsigned long interval, - const unsigned long timeout, - const unsigned long max_rto); +void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); +void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req); -extern void inet_csk_destroy_sock(struct sock *sk); -extern void inet_csk_prepare_forced_close(struct sock *sk); +void inet_csk_destroy_sock(struct sock *sk); +void inet_csk_prepare_forced_close(struct sock *sk); /* * LISTEN is a special case for poll.. @@ -333,15 +314,15 @@ (POLLIN | POLLRDNORM) : 0; } -extern int inet_csk_listen_start(struct sock *sk, const int nr_table_entries); -extern void inet_csk_listen_stop(struct sock *sk); +int inet_csk_listen_start(struct sock *sk, int backlog); +void inet_csk_listen_stop(struct sock *sk); -extern void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); +void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); -extern int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen); -extern int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); +int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, + char __user *optval, int __user *optlen); +int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, + char __user *optval, unsigned int optlen); -extern struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); +struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); #endif /* _INET_CONNECTION_SOCK_H */