--- zzzz-none-000/linux-3.10.107/include/net/request_sock.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/include/net/request_sock.h 2021-02-04 17:41:59.000000000 +0000 @@ -32,77 +32,100 @@ int obj_size; struct kmem_cache *slab; char *slab_name; - int (*rtx_syn_ack)(struct sock *sk, + int (*rtx_syn_ack)(const struct sock *sk, struct request_sock *req); - void (*send_ack)(struct sock *sk, struct sk_buff *skb, + void (*send_ack)(const struct sock *sk, struct sk_buff *skb, struct request_sock *req); - void (*send_reset)(struct sock *sk, + void (*send_reset)(const struct sock *sk, struct sk_buff *skb); void (*destructor)(struct request_sock *req); - void (*syn_ack_timeout)(struct sock *sk, - struct request_sock *req); + void (*syn_ack_timeout)(const struct request_sock *req); }; -extern int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req); +int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req); /* struct request_sock - mini sock to represent a connection request */ struct request_sock { + struct sock_common __req_common; +#define rsk_refcnt __req_common.skc_refcnt +#define rsk_hash __req_common.skc_hash +#define rsk_listener __req_common.skc_listener +#define rsk_window_clamp __req_common.skc_window_clamp +#define rsk_rcv_wnd __req_common.skc_rcv_wnd + struct request_sock *dl_next; u16 mss; u8 num_retrans; /* number of retransmits */ u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */ u8 num_timeout:7; /* number of timeouts */ - /* The following two fields can be easily recomputed I think -AK */ - u32 window_clamp; /* window clamp at creation time */ - u32 rcv_wnd; /* rcv_wnd offered first time */ u32 ts_recent; - unsigned long expires; + struct timer_list rsk_timer; const struct request_sock_ops *rsk_ops; struct sock *sk; + u32 *saved_syn; u32 secid; u32 peer_secid; }; -static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops) +static inline struct request_sock *inet_reqsk(struct sock *sk) { - struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC); + return (struct request_sock *)sk; +} - if (req != NULL) - req->rsk_ops = ops; +static inline struct sock *req_to_sk(struct request_sock *req) +{ + return (struct sock *)req; +} +static inline struct request_sock * +reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener, + bool attach_listener) +{ + struct request_sock *req; + + req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN); + + if (req) { + req->rsk_ops = ops; + if (attach_listener) { + sock_hold(sk_listener); + req->rsk_listener = sk_listener; + } else { + req->rsk_listener = NULL; + } + req_to_sk(req)->sk_prot = sk_listener->sk_prot; + sk_node_init(&req_to_sk(req)->sk_node); + sk_tx_queue_clear(req_to_sk(req)); + req->saved_syn = NULL; + /* Following is temporary. It is coupled with debugging + * helpers in reqsk_put() & reqsk_free() + */ + atomic_set(&req->rsk_refcnt, 0); + } return req; } -static inline void __reqsk_free(struct request_sock *req) +static inline void reqsk_free(struct request_sock *req) { + /* temporary debugging */ + WARN_ON_ONCE(atomic_read(&req->rsk_refcnt) != 0); + + req->rsk_ops->destructor(req); + if (req->rsk_listener) + sock_put(req->rsk_listener); + kfree(req->saved_syn); kmem_cache_free(req->rsk_ops->slab, req); } -static inline void reqsk_free(struct request_sock *req) +static inline void reqsk_put(struct request_sock *req) { - req->rsk_ops->destructor(req); - __reqsk_free(req); + if (atomic_dec_and_test(&req->rsk_refcnt)) + reqsk_free(req); } extern int sysctl_max_syn_backlog; -/** struct listen_sock - listen state - * - * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs - */ -struct listen_sock { - u8 max_qlen_log; - u8 synflood_warned; - /* 2 bytes hole, try to use */ - int qlen; - int qlen_young; - int clock_hand; - u32 hash_rnd; - u32 nr_table_entries; - struct request_sock *syn_table[0]; -}; - /* * For a TCP Fast Open listener - * lock - protects the access to all the reqsk, which is co-owned by @@ -136,144 +159,72 @@ * @rskq_accept_head - FIFO head of established children * @rskq_accept_tail - FIFO tail of established children * @rskq_defer_accept - User waits for some data after accept() - * @syn_wait_lock - serializer - * - * %syn_wait_lock is necessary only to avoid proc interface having to grab the main - * lock sock while browsing the listening hash (otherwise it's deadlock prone). * - * This lock is acquired in read mode only from listening_get_next() seq_file - * op and it's acquired in write mode _only_ from code that is actively - * changing rskq_accept_head. All readers that are holding the master sock lock - * don't need to grab this lock in read mode too as rskq_accept_head. writes - * are always protected from the main sock lock. */ struct request_sock_queue { + spinlock_t rskq_lock; + u8 rskq_defer_accept; + + u32 synflood_warned; + atomic_t qlen; + atomic_t young; + struct request_sock *rskq_accept_head; struct request_sock *rskq_accept_tail; - rwlock_t syn_wait_lock; - u8 rskq_defer_accept; - /* 3 bytes hole, try to pack */ - struct listen_sock *listen_opt; - struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been - * enabled on this listener. Check - * max_qlen != 0 in fastopen_queue - * to determine if TFO is enabled - * right at this moment. + struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine + * if TFO is enabled. */ }; -extern int reqsk_queue_alloc(struct request_sock_queue *queue, - unsigned int nr_table_entries); +void reqsk_queue_alloc(struct request_sock_queue *queue); -extern void __reqsk_queue_destroy(struct request_sock_queue *queue); -extern void reqsk_queue_destroy(struct request_sock_queue *queue); -extern void reqsk_fastopen_remove(struct sock *sk, - struct request_sock *req, bool reset); +void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, + bool reset); -static inline struct request_sock * - reqsk_queue_yank_acceptq(struct request_sock_queue *queue) -{ - struct request_sock *req = queue->rskq_accept_head; - - queue->rskq_accept_head = NULL; - return req; -} - -static inline int reqsk_queue_empty(struct request_sock_queue *queue) +static inline bool reqsk_queue_empty(const struct request_sock_queue *queue) { return queue->rskq_accept_head == NULL; } -static inline void reqsk_queue_unlink(struct request_sock_queue *queue, - struct request_sock *req, - struct request_sock **prev_req) -{ - write_lock(&queue->syn_wait_lock); - *prev_req = req->dl_next; - write_unlock(&queue->syn_wait_lock); -} - -static inline void reqsk_queue_add(struct request_sock_queue *queue, - struct request_sock *req, - struct sock *parent, - struct sock *child) +static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue, + struct sock *parent) { - req->sk = child; - sk_acceptq_added(parent); - - if (queue->rskq_accept_head == NULL) - queue->rskq_accept_head = req; - else - queue->rskq_accept_tail->dl_next = req; - - queue->rskq_accept_tail = req; - req->dl_next = NULL; -} - -static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue) -{ - struct request_sock *req = queue->rskq_accept_head; - - WARN_ON(req == NULL); - - queue->rskq_accept_head = req->dl_next; - if (queue->rskq_accept_head == NULL) - queue->rskq_accept_tail = NULL; + struct request_sock *req; + spin_lock_bh(&queue->rskq_lock); + req = queue->rskq_accept_head; + if (req) { + sk_acceptq_removed(parent); + queue->rskq_accept_head = req->dl_next; + if (queue->rskq_accept_head == NULL) + queue->rskq_accept_tail = NULL; + } + spin_unlock_bh(&queue->rskq_lock); return req; } -static inline int reqsk_queue_removed(struct request_sock_queue *queue, - struct request_sock *req) +static inline void reqsk_queue_removed(struct request_sock_queue *queue, + const struct request_sock *req) { - struct listen_sock *lopt = queue->listen_opt; - if (req->num_timeout == 0) - --lopt->qlen_young; - - return --lopt->qlen; + atomic_dec(&queue->young); + atomic_dec(&queue->qlen); } -static inline int reqsk_queue_added(struct request_sock_queue *queue) +static inline void reqsk_queue_added(struct request_sock_queue *queue) { - struct listen_sock *lopt = queue->listen_opt; - const int prev_qlen = lopt->qlen; - - lopt->qlen_young++; - lopt->qlen++; - return prev_qlen; + atomic_inc(&queue->young); + atomic_inc(&queue->qlen); } static inline int reqsk_queue_len(const struct request_sock_queue *queue) { - return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0; + return atomic_read(&queue->qlen); } static inline int reqsk_queue_len_young(const struct request_sock_queue *queue) { - return queue->listen_opt->qlen_young; -} - -static inline int reqsk_queue_is_full(const struct request_sock_queue *queue) -{ - return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log; -} - -static inline void reqsk_queue_hash_req(struct request_sock_queue *queue, - u32 hash, struct request_sock *req, - unsigned long timeout) -{ - struct listen_sock *lopt = queue->listen_opt; - - req->expires = jiffies + timeout; - req->num_retrans = 0; - req->num_timeout = 0; - req->sk = NULL; - req->dl_next = lopt->syn_table[hash]; - - write_lock(&queue->syn_wait_lock); - lopt->syn_table[hash] = req; - write_unlock(&queue->syn_wait_lock); + return atomic_read(&queue->young); } #endif /* _REQUEST_SOCK_H */