--- zzzz-none-000/linux-4.19.183/net/ipv4/tcp.c 2021-03-24 10:07:39.000000000 +0000 +++ bcm63-7530ax-756/linux-4.19.183/net/ipv4/tcp.c 2023-06-28 08:54:21.000000000 +0000 @@ -274,6 +274,9 @@ #include #include +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#include +#endif #include #include #include @@ -399,6 +402,28 @@ return rate64; } +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +static int select_size(const struct sock *sk, bool first_skb, bool zc); + +const struct tcp_sock_ops tcp_specific = { + .__select_window = __tcp_select_window, + .select_window = tcp_select_window, + .select_initial_window = tcp_select_initial_window, + .select_size = select_size, + .init_buffer_space = tcp_init_buffer_space, + .set_rto = tcp_set_rto, + .should_expand_sndbuf = tcp_should_expand_sndbuf, + .send_fin = tcp_send_fin, + .write_xmit = tcp_write_xmit, + .send_active_reset = tcp_send_active_reset, + .write_wakeup = tcp_write_wakeup, + .retransmit_timer = tcp_retransmit_timer, + .time_wait = tcp_time_wait, + .cleanup_rbuf = tcp_cleanup_rbuf, + .cwnd_validate = tcp_cwnd_validate, +}; + +#endif /* Address-family independent initialization for a tcp_sock. * * NOTE: A lot of things set to zero explicitly by call to @@ -452,6 +477,13 @@ sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1]; sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1]; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + tp->ops = &tcp_specific; + + /* Initialize MPTCP-specific stuff and function-pointers */ + mptcp_init_tcp_sock(sk); + +#endif sk_sockets_allocated_inc(sk); sk->sk_route_forced_caps = NETIF_F_GSO; } @@ -466,7 +498,11 @@ tcp_init_metrics(sk); tcp_call_bpf(sk, bpf_op, 0, NULL); tcp_init_congestion_control(sk); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_init_buffer_space(sk); +#else + tcp_sk(sk)->ops->init_buffer_space(sk); +#endif } static void tcp_tx_timestamp(struct sock *sk, u16 tsflags) @@ -796,6 +832,9 @@ int ret; sock_rps_record_flow(sk); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + +#endif /* * We can't seek on a socket input */ @@ -806,6 +845,18 @@ lock_sock(sk); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#ifdef CONFIG_MPTCP + if (mptcp(tcp_sk(sk))) { + struct mptcp_tcp_sock *mptcp; + + mptcp_for_each_sub(tcp_sk(sk)->mpcb, mptcp) { + sock_rps_record_flow(mptcp_to_sock(mptcp)); + } + } +#endif + +#endif timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); while (tss.len) { ret = __tcp_splice_read(sk, &tss); @@ -909,8 +960,12 @@ return NULL; } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, int large_allowed) +#else +unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, int large_allowed) +#endif { struct tcp_sock *tp = tcp_sk(sk); u32 new_size_goal, size_goal; @@ -938,8 +993,18 @@ { int mss_now; +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) mss_now = tcp_current_mss(sk); *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); +#else + if (mptcp(tcp_sk(sk))) { + mss_now = mptcp_current_mss(sk); + *size_goal = mptcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); + } else { + mss_now = tcp_current_mss(sk); + *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); + } +#endif return mss_now; } @@ -974,12 +1039,40 @@ * is fully established. */ if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) !tcp_passive_fastopen(sk)) { +#else + !tcp_passive_fastopen(mptcp(tp) && tp->mpcb->master_sk ? + tp->mpcb->master_sk : sk)) { +#endif err = sk_stream_wait_connect(sk, &timeo); if (err != 0) goto out_err; } +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + if (mptcp(tp)) { + struct mptcp_tcp_sock *mptcp; + + /* We must check this with socket-lock hold because we iterate + * over the subflows. + */ + if (!mptcp_can_sendpage(sk)) { + ssize_t ret; + + release_sock(sk); + ret = sock_no_sendpage(sk->sk_socket, page, offset, + size, flags); + lock_sock(sk); + return ret; + } + + mptcp_for_each_sub(tp->mpcb, mptcp) { + sock_rps_record_flow(mptcp_to_sock(mptcp)); + } + } + +#endif sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); mss_now = tcp_send_mss(sk, &size_goal, flags); @@ -1098,7 +1191,12 @@ int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, size_t size, int flags) { +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) if (!(sk->sk_route_caps & NETIF_F_SG)) +#else + /* If MPTCP is enabled, we check it later after establishment */ + if (!mptcp(tcp_sk(sk)) && !(sk->sk_route_caps & NETIF_F_SG)) +#endif return sock_no_sendpage_locked(sk, page, offset, size, flags); tcp_rate_check_app_limited(sk); /* is sending application-limited? */ @@ -1130,14 +1228,22 @@ * This also speeds up tso_fragment(), since it wont fallback * to tcp_fragment(). */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static int linear_payload_sz(bool first_skb) +#else +int linear_payload_sz(bool first_skb) +#endif { if (first_skb) return SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER); return 0; } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static int select_size(bool first_skb, bool zc) +#else +static int select_size(const struct sock *sk, bool first_skb, bool zc) +#endif { if (zc) return 0; @@ -1247,12 +1353,27 @@ * is fully established. */ if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) !tcp_passive_fastopen(sk)) { +#else + !tcp_passive_fastopen(mptcp(tp) && tp->mpcb->master_sk ? + tp->mpcb->master_sk : sk)) { +#endif err = sk_stream_wait_connect(sk, &timeo); if (err != 0) goto do_error; } +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + if (mptcp(tp)) { + struct mptcp_tcp_sock *mptcp; + + mptcp_for_each_sub(tp->mpcb, mptcp) { + sock_rps_record_flow(mptcp_to_sock(mptcp)); + } + } + +#endif if (unlikely(tp->repair)) { if (tp->repair_queue == TCP_RECV_QUEUE) { copied = tcp_send_rcvq(sk, msg, size); @@ -1308,7 +1429,11 @@ goto restart; } first_skb = tcp_rtx_and_write_queues_empty(sk); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) linear = select_size(first_skb, zc); +#else + linear = tp->ops->select_size(sk, first_skb, zc); +#endif skb = sk_stream_alloc_skb(sk, linear, sk->sk_allocation, first_skb); if (!skb) @@ -1546,7 +1671,11 @@ * calculation of whether or not we must ACK for the sake of * a window update. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static void tcp_cleanup_rbuf(struct sock *sk, int copied) +#else +void tcp_cleanup_rbuf(struct sock *sk, int copied) +#endif { struct tcp_sock *tp = tcp_sk(sk); bool time_to_ack = false; @@ -1589,7 +1718,11 @@ /* Optimize, __tcp_select_window() is not cheap. */ if (2*rcv_window_now <= tp->window_clamp) { +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) __u32 new_window = __tcp_select_window(sk); +#else + __u32 new_window = tp->ops->__select_window(sk); +#endif /* Send ACK now, if this read freed lots of space * in our buffer. Certainly, new_window is new window. @@ -1705,7 +1838,11 @@ /* Clean up data we have read: This will do ACK frames. */ if (copied > 0) { tcp_recv_skb(sk, seq, &offset); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_cleanup_rbuf(sk, copied); +#else + tp->ops->cleanup_rbuf(sk, copied); +#endif } return copied; } @@ -1963,6 +2100,18 @@ lock_sock(sk); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#ifdef CONFIG_MPTCP + if (mptcp(tp)) { + struct mptcp_tcp_sock *mptcp; + + mptcp_for_each_sub(tp->mpcb, mptcp) { + sock_rps_record_flow(mptcp_to_sock(mptcp)); + } + } +#endif + +#endif err = -ENOTCONN; if (sk->sk_state == TCP_LISTEN) goto out; @@ -2081,7 +2230,11 @@ } } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_cleanup_rbuf(sk, copied); +#else + tp->ops->cleanup_rbuf(sk, copied); +#endif if (copied >= target) { /* Do not sleep, just process backlog. */ @@ -2174,7 +2327,11 @@ */ /* Clean up data we have read: This will do ACK frames. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_cleanup_rbuf(sk, copied); +#else + tp->ops->cleanup_rbuf(sk, copied); +#endif release_sock(sk); @@ -2286,7 +2443,11 @@ [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ }; +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) static int tcp_close_state(struct sock *sk) +#else +int tcp_close_state(struct sock *sk) +#endif { int next = (int)new_state[sk->sk_state]; int ns = next & TCP_STATE_MASK; @@ -2316,7 +2477,11 @@ TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { /* Clear out any half completed packets. FIN if needed. */ if (tcp_close_state(sk)) +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_send_fin(sk); +#else + tcp_sk(sk)->ops->send_fin(sk); +#endif } } EXPORT_SYMBOL(tcp_shutdown); @@ -2341,6 +2506,19 @@ int data_was_unread = 0; int state; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + if (is_meta_sk(sk)) { + /* TODO: Currently forcing timeout to 0 because + * sk_stream_wait_close will complain during lockdep because + * of the mpcb_mutex (circular lock dependency through + * inet_csk_listen_stop()). + * We should find a way to get rid of the mpcb_mutex. + */ + mptcp_close(sk, 0); + return; + } + +#endif lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; @@ -2385,7 +2563,11 @@ /* Unread data was tossed, zap the connection. */ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); tcp_set_state(sk, TCP_CLOSE); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_send_active_reset(sk, sk->sk_allocation); +#else + tcp_sk(sk)->ops->send_active_reset(sk, sk->sk_allocation); +#endif } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { /* Check zero linger _after_ checking for unread data. */ sk->sk_prot->disconnect(sk, 0); @@ -2459,7 +2641,11 @@ struct tcp_sock *tp = tcp_sk(sk); if (tp->linger2 < 0) { tcp_set_state(sk, TCP_CLOSE); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_send_active_reset(sk, GFP_ATOMIC); +#else + tp->ops->send_active_reset(sk, GFP_ATOMIC); +#endif __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONLINGER); } else { @@ -2469,7 +2655,12 @@ inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); } else { +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); +#else + tcp_sk(sk)->ops->time_wait(sk, TCP_FIN_WAIT2, + tmo); +#endif goto out; } } @@ -2478,7 +2669,11 @@ sk_mem_reclaim(sk); if (tcp_check_oom(sk, 0)) { tcp_set_state(sk, TCP_CLOSE); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_send_active_reset(sk, GFP_ATOMIC); +#else + tcp_sk(sk)->ops->send_active_reset(sk, GFP_ATOMIC); +#endif __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); } else if (!check_net(sock_net(sk))) { @@ -2507,6 +2702,7 @@ } EXPORT_SYMBOL(tcp_close); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) /* These states need RST on ABORT according to RFC793 */ static inline bool tcp_need_reset(int state) @@ -2516,6 +2712,7 @@ TCPF_FIN_WAIT2 | TCPF_SYN_RECV); } +#endif static void tcp_rtx_queue_purge(struct sock *sk) { struct rb_node *p = rb_first(&sk->tcp_rtx_queue); @@ -2537,6 +2734,12 @@ { struct sk_buff *skb; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + if (mptcp(tcp_sk(sk)) && !is_meta_sk(sk) && + !tcp_rtx_and_write_queues_empty(sk)) + mptcp_reinject_data(sk, 0); + +#endif tcp_chrono_stop(sk, TCP_CHRONO_BUSY); while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { tcp_skb_tsorted_anchor_cleanup(skb); @@ -2572,7 +2775,11 @@ /* The last check adjusts for discrepancy of Linux wrt. RFC * states */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_send_active_reset(sk, gfp_any()); +#else + tp->ops->send_active_reset(sk, gfp_any()); +#endif sk->sk_err = ECONNRESET; } else if (old_state == TCP_SYN_SENT) sk->sk_err = ECONNRESET; @@ -2590,6 +2797,15 @@ if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) inet_reset_saddr(sk); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + if (is_meta_sk(sk)) { + mptcp_disconnect(sk); + } else { + if (tp->inside_tk_table) + mptcp_hash_remove_bh(tp); + } + +#endif sk->sk_shutdown = 0; sock_reset_flag(sk, SOCK_DONE); tp->srtt_us = 0; @@ -2656,7 +2872,11 @@ static inline bool tcp_can_repair_sock(const struct sock *sk) { return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) (sk->sk_state != TCP_LISTEN); +#else + (sk->sk_state != TCP_LISTEN) && !sock_flag(sk, SOCK_MPTCP); +#endif } static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int len) @@ -2802,6 +3022,63 @@ return tcp_fastopen_reset_cipher(net, sk, key, sizeof(key)); } +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#ifdef CONFIG_MPTCP + case MPTCP_SCHEDULER: { + char name[MPTCP_SCHED_NAME_MAX]; + + if (optlen < 1) + return -EINVAL; + + /* Cannot be used if MPTCP is not used or we already have + * established an MPTCP-connection. + */ + if (mptcp_init_failed || !sysctl_mptcp_enabled || + sk->sk_state != TCP_CLOSE) + return -EPERM; + + val = strncpy_from_user(name, optval, + min_t(long, MPTCP_SCHED_NAME_MAX - 1, + optlen)); + + if (val < 0) + return -EFAULT; + name[val] = 0; + + lock_sock(sk); + err = mptcp_set_scheduler(sk, name); + release_sock(sk); + return err; + } + + case MPTCP_PATH_MANAGER: { + char name[MPTCP_PM_NAME_MAX]; + + if (optlen < 1) + return -EINVAL; + + /* Cannot be used if MPTCP is not used or we already have + * established an MPTCP-connection. + */ + if (mptcp_init_failed || !sysctl_mptcp_enabled || + sk->sk_state != TCP_CLOSE) + return -EPERM; + + val = strncpy_from_user(name, optval, + min_t(long, MPTCP_PM_NAME_MAX - 1, + optlen)); + + if (val < 0) + return -EFAULT; + name[val] = 0; + + lock_sock(sk); + err = mptcp_set_path_manager(sk, name); + release_sock(sk); + return err; + } +#endif +#endif default: /* fallthru */ break; @@ -2991,6 +3268,14 @@ break; case TCP_DEFER_ACCEPT: +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + /* An established MPTCP-connection (mptcp(tp) only returns true + * if the socket is established) should not use DEFER on new + * subflows. + */ + if (mptcp(tp)) + break; +#endif /* Translate value in seconds to number of retransmits */ icsk->icsk_accept_queue.rskq_defer_accept = secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, @@ -3018,7 +3303,11 @@ (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && inet_csk_ack_scheduled(sk)) { icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_cleanup_rbuf(sk, 1); +#else + tp->ops->cleanup_rbuf(sk, 1); +#endif if (!(val & 1)) icsk->icsk_ack.pingpong = 1; } @@ -3028,6 +3317,10 @@ #ifdef CONFIG_TCP_MD5SIG case TCP_MD5SIG: case TCP_MD5SIG_EXT: +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) +#else + if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN) && !sock_flag(sk, SOCK_MPTCP)) +#endif err = tp->af_specific->md5_parse(sk, optname, optval, optlen); break; #endif @@ -3084,6 +3377,34 @@ tp->notsent_lowat = val; sk->sk_write_space(sk); break; +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#ifdef CONFIG_MPTCP + case MPTCP_ENABLED: + if (mptcp_init_failed || !sysctl_mptcp_enabled || + sk->sk_state != TCP_CLOSE +#ifdef CONFIG_TCP_MD5SIG + || tp->md5sig_info +#endif + ) { + err = -EPERM; + break; + } + + if (val) + mptcp_enable_sock(sk); + else + mptcp_disable_sock(sk); + break; + case MPTCP_INFO: + if (mptcp_init_failed || !sysctl_mptcp_enabled) { + err = -EPERM; + break; + } + + tp->record_master_info = !!(val & MPTCP_INFO_FLAG_SAVE_MASTER); + break; +#endif +#endif case TCP_INQ: if (val > 1 || val < 0) err = -EINVAL; @@ -3143,7 +3464,11 @@ } /* Return information about state of tcp endpoint in API format. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) void tcp_get_info(struct sock *sk, struct tcp_info *info) +#else +void tcp_get_info(struct sock *sk, struct tcp_info *info, bool no_lock) +#endif { const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ const struct inet_connection_sock *icsk = inet_csk(sk); @@ -3180,7 +3505,12 @@ return; } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) slow = lock_sock_fast(sk); +#else + if (!no_lock) + slow = lock_sock_fast(sk); +#endif info->tcpi_ca_state = icsk->icsk_ca_state; info->tcpi_retransmits = icsk->icsk_retransmits; @@ -3254,7 +3584,13 @@ info->tcpi_bytes_retrans = tp->bytes_retrans; info->tcpi_dsack_dups = tp->dsack_dups; info->tcpi_reord_seen = tp->reord_seen; +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) unlock_sock_fast(sk, slow); +#else + + if (!no_lock) + unlock_sock_fast(sk, slow); +#endif } EXPORT_SYMBOL_GPL(tcp_get_info); @@ -3399,7 +3735,11 @@ if (get_user(len, optlen)) return -EFAULT; +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_get_info(sk, &info); +#else + tcp_get_info(sk, &info, false); +#endif len = min_t(unsigned int, len, sizeof(info)); if (put_user(len, optlen)) @@ -3590,6 +3930,89 @@ } return 0; } +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) +#ifdef CONFIG_MPTCP + case MPTCP_SCHEDULER: + if (get_user(len, optlen)) + return -EFAULT; + len = min_t(unsigned int, len, MPTCP_SCHED_NAME_MAX); + if (put_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + if (mptcp(tcp_sk(sk))) { + struct mptcp_cb *mpcb = tcp_sk(mptcp_meta_sk(sk))->mpcb; + + if (copy_to_user(optval, mpcb->sched_ops->name, len)) { + release_sock(sk); + return -EFAULT; + } + } else { + if (copy_to_user(optval, tcp_sk(sk)->mptcp_sched_name, + len)) { + release_sock(sk); + return -EFAULT; + } + } + release_sock(sk); + return 0; + + case MPTCP_PATH_MANAGER: + if (get_user(len, optlen)) + return -EFAULT; + len = min_t(unsigned int, len, MPTCP_PM_NAME_MAX); + if (put_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + if (mptcp(tcp_sk(sk))) { + struct mptcp_cb *mpcb = tcp_sk(mptcp_meta_sk(sk))->mpcb; + + if (copy_to_user(optval, mpcb->pm_ops->name, len)) { + release_sock(sk); + return -EFAULT; + } + } else { + if (copy_to_user(optval, tcp_sk(sk)->mptcp_pm_name, + len)) { + release_sock(sk); + return -EFAULT; + } + } + release_sock(sk); + return 0; + + case MPTCP_ENABLED: + if (sk->sk_state != TCP_SYN_SENT) + val = mptcp(tp) ? 1 : 0; + else + val = sock_flag(sk, SOCK_MPTCP) ? 1 : 0; + break; + case MPTCP_INFO: + { + int ret; + + if (!mptcp(tp)) + return -EINVAL; + + if (get_user(len, optlen)) + return -EFAULT; + + len = min_t(unsigned int, len, sizeof(struct mptcp_info)); + + lock_sock(sk); + ret = mptcp_get_info(sk, optval, len); + release_sock(sk); + + if (ret) + return ret; + + if (put_user(len, optlen)) + return -EFAULT; + return 0; + } +#endif +#endif #ifdef CONFIG_MMU case TCP_ZEROCOPY_RECEIVE: { struct tcp_zerocopy_receive zc; @@ -3786,7 +4209,13 @@ if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + WARN_ON(sk->sk_state == TCP_CLOSE); +#endif tcp_set_state(sk, TCP_CLOSE); +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + +#endif tcp_clear_xmit_timers(sk); if (req) reqsk_fastopen_remove(sk, req, false); @@ -3802,6 +4231,10 @@ int tcp_abort(struct sock *sk, int err) { +#if defined(CONFIG_BCM_KF_MPTCP) && defined(CONFIG_BCM_MPTCP) + struct sock *meta_sk = mptcp(tcp_sk(sk)) ? mptcp_meta_sk(sk) : sk; + +#endif if (!sk_fullsock(sk)) { if (sk->sk_state == TCP_NEW_SYN_RECV) { struct request_sock *req = inet_reqsk(sk); @@ -3815,7 +4248,11 @@ } /* Don't race with userspace socket closes such as tcp_close. */ +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) lock_sock(sk); +#else + lock_sock(meta_sk); +#endif if (sk->sk_state == TCP_LISTEN) { tcp_set_state(sk, TCP_CLOSE); @@ -3824,7 +4261,11 @@ /* Don't race with BH socket closes such as inet_csk_listen_stop. */ local_bh_disable(); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) bh_lock_sock(sk); +#else + bh_lock_sock(meta_sk); +#endif if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_err = err; @@ -3832,14 +4273,26 @@ smp_wmb(); sk->sk_error_report(sk); if (tcp_need_reset(sk->sk_state)) +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) tcp_send_active_reset(sk, GFP_ATOMIC); +#else + tcp_sk(sk)->ops->send_active_reset(sk, GFP_ATOMIC); +#endif tcp_done(sk); } +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) bh_unlock_sock(sk); +#else + bh_unlock_sock(meta_sk); +#endif local_bh_enable(); tcp_write_queue_purge(sk); +#if !defined(CONFIG_BCM_KF_MPTCP) || !defined(CONFIG_BCM_MPTCP) release_sock(sk); +#else + release_sock(meta_sk); +#endif return 0; } EXPORT_SYMBOL_GPL(tcp_abort);