--- zzzz-none-000/linux-3.10.107/net/sctp/associola.c 2017-06-27 09:49:32.000000000 +0000
+++ scorpion-7490-727/linux-3.10.107/net/sctp/associola.c 2021-02-04 17:41:59.000000000 +0000
@@ -22,16 +22,12 @@
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, write to
- * the Free Software Foundation, 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * along with GNU CC; see the file COPYING. If not, see
+ * .
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers
*
* Written or modified by:
* La Monte H.P. Yarroll
@@ -43,9 +39,6 @@
* Daisy Chang
* Ryan Layer
* Kevin Gao
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -62,6 +55,7 @@
#include
/* Forward declarations for internal functions. */
+static void sctp_select_active_and_retran_path(struct sctp_association *asoc);
static void sctp_assoc_bh_rcv(struct work_struct *work);
static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
@@ -86,10 +80,9 @@
/* Discarding const is appropriate here. */
asoc->ep = (struct sctp_endpoint *)ep;
- sctp_endpoint_hold(asoc->ep);
-
- /* Hold the sock. */
asoc->base.sk = (struct sock *)sk;
+
+ sctp_endpoint_hold(asoc->ep);
sock_hold(asoc->base.sk);
/* Initialize the common base substructure. */
@@ -97,20 +90,12 @@
/* Initialize the object handling fields. */
atomic_set(&asoc->base.refcnt, 1);
- asoc->base.dead = false;
/* Initialize the bind addr area. */
sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
asoc->state = SCTP_STATE_CLOSED;
-
- /* Set these values from the socket values, a conversion between
- * millsecons to seconds/microseconds must also be done.
- */
- asoc->cookie_life.tv_sec = sp->assocparams.sasoc_cookie_life / 1000;
- asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000)
- * 1000;
- asoc->frag_point = 0;
+ asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
asoc->user_frag = sp->user_frag;
/* Set the association max_retrans and RTO values from the
@@ -123,8 +108,6 @@
asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
- asoc->overall_error_count = 0;
-
/* Initialize the association's heartbeat interval based on the
* sock configured value.
*/
@@ -145,18 +128,15 @@
*/
asoc->param_flags = sp->param_flags;
- /* Initialize the maximum mumber of new data packets that can be sent
+ /* Initialize the maximum number of new data packets that can be sent
* in a burst.
*/
asoc->max_burst = sp->max_burst;
/* initialize association timers */
- asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0;
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
- asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
- asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0;
/* sctpimpguide Section 2.12.2
* If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
@@ -165,10 +145,8 @@
asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
= 5 * asoc->rto_max;
- asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
- asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
- min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ;
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
/* Initializes the timers */
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
@@ -186,11 +164,6 @@
asoc->max_init_timeo =
msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
- /* Allocate storage for the ssnmap after the inbound and outbound
- * streams have been negotiated during Init.
- */
- asoc->ssnmap = NULL;
-
/* Set the local window size for receive.
* This is also the rcvbuf space per association.
* RFC 6 - A SCTP receiver MUST be able to receive a minimum of
@@ -203,25 +176,15 @@
asoc->a_rwnd = asoc->rwnd;
- asoc->rwnd_over = 0;
- asoc->rwnd_press = 0;
-
/* Use my own max window until I learn something better. */
asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
- /* Set the sndbuf size for transmit. */
- asoc->sndbuf_used = 0;
-
/* Initialize the receive memory counter */
atomic_set(&asoc->rmem_alloc, 0);
init_waitqueue_head(&asoc->wait);
asoc->c.my_vtag = sctp_generate_tag(ep);
- asoc->peer.i.init_tag = 0; /* INIT needs a vtag of 0. */
- asoc->c.peer_vtag = 0;
- asoc->c.my_ttag = 0;
- asoc->c.peer_ttag = 0;
asoc->c.my_port = ep->base.bind_addr.port;
asoc->c.initial_tsn = sctp_generate_tsn(ep);
@@ -232,7 +195,6 @@
asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
asoc->highest_sacked = asoc->ctsn_ack_point;
asoc->last_cwr_tsn = asoc->ctsn_ack_point;
- asoc->unack_data = 0;
/* ADDIP Section 4.1 Asconf Chunk Procedures
*
@@ -251,7 +213,6 @@
/* Make an empty list of remote transport addresses. */
INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
- asoc->peer.transport_count = 0;
/* RFC 2960 5.1 Normal Establishment of an Association
*
@@ -265,20 +226,15 @@
* already received one packet.]
*/
asoc->peer.sack_needed = 1;
- asoc->peer.sack_cnt = 0;
asoc->peer.sack_generation = 1;
/* Assume that the peer will tell us if he recognizes ASCONF
* as part of INIT exchange.
- * The sctp_addip_noauth option is there for backward compatibilty
+ * The sctp_addip_noauth option is there for backward compatibility
* and will revert old behavior.
*/
- asoc->peer.asconf_capable = 0;
if (net->sctp.addip_noauth)
asoc->peer.asconf_capable = 1;
- asoc->asconf_addr_del_pending = NULL;
- asoc->src_out_of_asoc_ok = 0;
- asoc->new_transport = NULL;
/* Create an input queue. */
sctp_inq_init(&asoc->base.inqueue);
@@ -290,12 +246,6 @@
if (!sctp_ulpq_init(&asoc->ulpq, asoc))
goto fail_init;
- memset(&asoc->peer.tsn_map, 0, sizeof(struct sctp_tsnmap));
-
- asoc->need_ecne = 0;
-
- asoc->assoc_id = 0;
-
/* Assume that peer would support both address types unless we are
* told otherwise.
*/
@@ -304,8 +254,6 @@
asoc->peer.ipv6_address = 1;
INIT_LIST_HEAD(&asoc->asocs);
- asoc->autoclose = sp->autoclose;
-
asoc->default_stream = sp->default_stream;
asoc->default_ppid = sp->default_ppid;
asoc->default_flags = sp->default_flags;
@@ -313,9 +261,6 @@
asoc->default_timetolive = sp->default_timetolive;
asoc->default_rcv_context = sp->default_rcv_context;
- /* SCTP_GET_ASSOC_STATS COUNTERS */
- memset(&asoc->stats, 0, sizeof(struct sctp_priv_assoc_stats));
-
/* AUTH related initializations */
INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
@@ -323,9 +268,7 @@
goto fail_init;
asoc->active_key_id = ep->active_key_id;
- asoc->asoc_shared_key = NULL;
- asoc->default_hmac_id = 0;
/* Save the hmacs and chunks list into this association */
if (ep->auth_hmacs_list)
memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
@@ -343,8 +286,8 @@
return asoc;
fail_init:
- sctp_endpoint_put(asoc->ep);
sock_put(asoc->base.sk);
+ sctp_endpoint_put(asoc->ep);
return NULL;
}
@@ -356,7 +299,7 @@
{
struct sctp_association *asoc;
- asoc = t_new(struct sctp_association, gfp);
+ asoc = kzalloc(sizeof(*asoc), gfp);
if (!asoc)
goto fail;
@@ -364,7 +307,8 @@
goto fail_init;
SCTP_DBG_OBJCNT_INC(assoc);
- SCTP_DEBUG_PRINTK("Created asoc %p\n", asoc);
+
+ pr_debug("Created asoc %p\n", asoc);
return asoc;
@@ -447,8 +391,7 @@
sctp_asconf_queue_teardown(asoc);
/* Free pending address space being deleted */
- if (asoc->asconf_addr_del_pending != NULL)
- kfree(asoc->asconf_addr_del_pending);
+ kfree(asoc->asconf_addr_del_pending);
/* AUTH - Free the endpoint shared keys */
sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
@@ -462,7 +405,10 @@
/* Cleanup and free up an association. */
static void sctp_association_destroy(struct sctp_association *asoc)
{
- SCTP_ASSERT(asoc->base.dead, "Assoc is not dead", return);
+ if (unlikely(!asoc->base.dead)) {
+ WARN(1, "Attempt to destroy undead association %p!\n", asoc);
+ return;
+ }
sctp_endpoint_put(asoc->ep);
sock_put(asoc->base.sk);
@@ -543,11 +489,8 @@
struct list_head *pos;
struct sctp_transport *transport;
- SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_rm_peer:association %p addr: ",
- " port: %d\n",
- asoc,
- (&peer->ipaddr),
- ntohs(peer->ipaddr.v4.sin_port));
+ pr_debug("%s: association:%p addr:%pISpc\n",
+ __func__, asoc, &peer->ipaddr.sa);
/* If we are to remove the current retran_path, update it
* to the next peer before removing this peer from the list.
@@ -614,7 +557,7 @@
/* Start a T3 timer here in case it wasn't running so
* that these migrated packets have a chance to get
- * retrnasmitted.
+ * retransmitted.
*/
if (!timer_pending(&active->T3_rtx_timer))
if (!mod_timer(&active->T3_rtx_timer,
@@ -643,12 +586,8 @@
/* AF_INET and AF_INET6 share common port field. */
port = ntohs(addr->v4.sin_port);
- SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",
- " port: %d state:%d\n",
- asoc,
- addr,
- port,
- peer_state);
+ pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__,
+ asoc, &addr->sa, peer_state);
/* Set the port if it has not been set yet. */
if (0 == asoc->peer.port)
@@ -681,7 +620,7 @@
/* Set the path max_retrans. */
peer->pathmaxrxt = asoc->pathmaxrxt;
- /* And the partial failure retrnas threshold */
+ /* And the partial failure retrans threshold */
peer->pf_retrans = asoc->pf_retrans;
/* Initialize the peer's SACK delay timeout based on the
@@ -715,8 +654,9 @@
else
asoc->pathmtu = peer->pathmtu;
- SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to "
- "%d\n", asoc, asoc->pathmtu);
+ pr_debug("%s: association:%p PMTU set to %d\n", __func__, asoc,
+ asoc->pathmtu);
+
peer->pmtu_pending = 0;
asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
@@ -834,9 +774,6 @@
sctp_transport_cmd_t command,
sctp_sn_error_t error)
{
- struct sctp_transport *t = NULL;
- struct sctp_transport *first;
- struct sctp_transport *second;
struct sctp_ulpevent *event;
struct sockaddr_storage addr;
int spc_state = 0;
@@ -855,12 +792,12 @@
else
spc_state = SCTP_ADDR_AVAILABLE;
/* Don't inform ULP about transition from PF to
- * active state and set cwnd to 1, see SCTP
+ * active state and set cwnd to 1 MTU, see SCTP
* Quick failover draft section 5.1, point 5
*/
if (transport->state == SCTP_PF) {
ulp_notify = false;
- transport->cwnd = 1;
+ transport->cwnd = asoc->pathmtu;
}
transport->state = SCTP_ACTIVE;
break;
@@ -875,6 +812,7 @@
else {
dst_release(transport->dst);
transport->dst = NULL;
+ ulp_notify = false;
}
spc_state = SCTP_ADDR_UNREACHABLE;
@@ -889,13 +827,14 @@
return;
}
- /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
- * user.
+ /* Generate and send a SCTP_PEER_ADDR_CHANGE notification
+ * to the user.
*/
if (ulp_notify) {
memset(&addr, 0, sizeof(struct sockaddr_storage));
memcpy(&addr, &transport->ipaddr,
transport->af_specific->sockaddr_len);
+
event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
0, spc_state, error, GFP_ATOMIC);
if (event)
@@ -903,58 +842,7 @@
}
/* Select new active and retran paths. */
-
- /* Look for the two most recently used active transports.
- *
- * This code produces the wrong ordering whenever jiffies
- * rolls over, but we still get usable transports, so we don't
- * worry about it.
- */
- first = NULL; second = NULL;
-
- list_for_each_entry(t, &asoc->peer.transport_addr_list,
- transports) {
-
- if ((t->state == SCTP_INACTIVE) ||
- (t->state == SCTP_UNCONFIRMED) ||
- (t->state == SCTP_PF))
- continue;
- if (!first || t->last_time_heard > first->last_time_heard) {
- second = first;
- first = t;
- }
- if (!second || t->last_time_heard > second->last_time_heard)
- second = t;
- }
-
- /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
- *
- * By default, an endpoint should always transmit to the
- * primary path, unless the SCTP user explicitly specifies the
- * destination transport address (and possibly source
- * transport address) to use.
- *
- * [If the primary is active but not most recent, bump the most
- * recently used transport.]
- */
- if (((asoc->peer.primary_path->state == SCTP_ACTIVE) ||
- (asoc->peer.primary_path->state == SCTP_UNKNOWN)) &&
- first != asoc->peer.primary_path) {
- second = first;
- first = asoc->peer.primary_path;
- }
-
- /* If we failed to find a usable transport, just camp on the
- * primary, even if it is inactive.
- */
- if (!first) {
- first = asoc->peer.primary_path;
- second = asoc->peer.primary_path;
- }
-
- /* Set the active and retran transports. */
- asoc->peer.active_path = first;
- asoc->peer.retran_path = second;
+ sctp_select_active_and_retran_path(asoc);
}
/* Hold a reference to an association. */
@@ -1010,17 +898,13 @@
*/
struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
{
- struct sctp_chunk *chunk;
+ if (!asoc->need_ecne)
+ return NULL;
/* Send ECNE if needed.
* Not being able to allocate a chunk here is not deadly.
*/
- if (asoc->need_ecne)
- chunk = sctp_make_ecne(asoc, asoc->last_ecne_tsn);
- else
- chunk = NULL;
-
- return chunk;
+ return sctp_make_ecne(asoc, asoc->last_ecne_tsn);
}
/*
@@ -1152,7 +1036,7 @@
}
if (chunk->transport)
- chunk->transport->last_time_heard = jiffies;
+ chunk->transport->last_time_heard = ktime_get();
/* Run through the state machine. */
error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
@@ -1282,7 +1166,7 @@
}
}
- /* SCTP-AUTH: Save the peer parameters from the new assocaitions
+ /* SCTP-AUTH: Save the peer parameters from the new associations
* and also move the association shared keys over
*/
kfree(asoc->peer.peer_random);
@@ -1324,29 +1208,59 @@
* within this document.
*
* Our basic strategy is to round-robin transports in priorities
- * according to sctp_state_prio_map[] e.g., if no such
+ * according to sctp_trans_score() e.g., if no such
* transport with state SCTP_ACTIVE exists, round-robin through
* SCTP_UNKNOWN, etc. You get the picture.
*/
-static const u8 sctp_trans_state_to_prio_map[] = {
- [SCTP_ACTIVE] = 3, /* best case */
- [SCTP_UNKNOWN] = 2,
- [SCTP_PF] = 1,
- [SCTP_INACTIVE] = 0, /* worst case */
-};
-
static u8 sctp_trans_score(const struct sctp_transport *trans)
{
- return sctp_trans_state_to_prio_map[trans->state];
+ switch (trans->state) {
+ case SCTP_ACTIVE:
+ return 3; /* best case */
+ case SCTP_UNKNOWN:
+ return 2;
+ case SCTP_PF:
+ return 1;
+ default: /* case SCTP_INACTIVE */
+ return 0; /* worst case */
+ }
+}
+
+static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
+ struct sctp_transport *trans2)
+{
+ if (trans1->error_count > trans2->error_count) {
+ return trans2;
+ } else if (trans1->error_count == trans2->error_count &&
+ ktime_after(trans2->last_time_heard,
+ trans1->last_time_heard)) {
+ return trans2;
+ } else {
+ return trans1;
+ }
}
static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
- struct sctp_transport *best)
+ struct sctp_transport *best)
{
- if (best == NULL)
+ u8 score_curr, score_best;
+
+ if (best == NULL || curr == best)
return curr;
- return sctp_trans_score(curr) > sctp_trans_score(best) ? curr : best;
+ score_curr = sctp_trans_score(curr);
+ score_best = sctp_trans_score(best);
+
+ /* First, try a score-based selection if both transport states
+ * differ. If we're in a tie, lets try to make a more clever
+ * decision here based on error counts and last time heard.
+ */
+ if (score_curr > score_best)
+ return curr;
+ else if (score_curr == score_best)
+ return sctp_trans_elect_tie(curr, best);
+ else
+ return best;
}
void sctp_assoc_update_retran_path(struct sctp_association *asoc)
@@ -1381,15 +1295,77 @@
break;
}
- if (trans_next != NULL)
- asoc->peer.retran_path = trans_next;
+ asoc->peer.retran_path = trans_next;
+
+ pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
+ __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
+}
+
+static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
+{
+ struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL;
+ struct sctp_transport *trans_pf = NULL;
+
+ /* Look for the two most recently used active transports. */
+ list_for_each_entry(trans, &asoc->peer.transport_addr_list,
+ transports) {
+ /* Skip uninteresting transports. */
+ if (trans->state == SCTP_INACTIVE ||
+ trans->state == SCTP_UNCONFIRMED)
+ continue;
+ /* Keep track of the best PF transport from our
+ * list in case we don't find an active one.
+ */
+ if (trans->state == SCTP_PF) {
+ trans_pf = sctp_trans_elect_best(trans, trans_pf);
+ continue;
+ }
+ /* For active transports, pick the most recent ones. */
+ if (trans_pri == NULL ||
+ ktime_after(trans->last_time_heard,
+ trans_pri->last_time_heard)) {
+ trans_sec = trans_pri;
+ trans_pri = trans;
+ } else if (trans_sec == NULL ||
+ ktime_after(trans->last_time_heard,
+ trans_sec->last_time_heard)) {
+ trans_sec = trans;
+ }
+ }
+
+ /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
+ *
+ * By default, an endpoint should always transmit to the primary
+ * path, unless the SCTP user explicitly specifies the
+ * destination transport address (and possibly source transport
+ * address) to use. [If the primary is active but not most recent,
+ * bump the most recently used transport.]
+ */
+ if ((asoc->peer.primary_path->state == SCTP_ACTIVE ||
+ asoc->peer.primary_path->state == SCTP_UNKNOWN) &&
+ asoc->peer.primary_path != trans_pri) {
+ trans_sec = trans_pri;
+ trans_pri = asoc->peer.primary_path;
+ }
+
+ /* We did not find anything useful for a possible retransmission
+ * path; either primary path that we found is the the same as
+ * the current one, or we didn't generally find an active one.
+ */
+ if (trans_sec == NULL)
+ trans_sec = trans_pri;
+
+ /* If we failed to find a usable transport, just camp on the
+ * active or pick a PF iff it's the better choice.
+ */
+ if (trans_pri == NULL) {
+ trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf);
+ trans_sec = trans_pri;
+ }
- SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
- " %p updated new path to addr: ",
- " port: %d\n",
- asoc,
- (&asoc->peer.retran_path->ipaddr),
- ntohs(asoc->peer.retran_path->ipaddr.v4.sin_port));
+ /* Set the active and retran transports. */
+ asoc->peer.active_path = trans_pri;
+ asoc->peer.retran_path = trans_sec;
}
struct sctp_transport *
@@ -1437,12 +1413,12 @@
asoc->frag_point = sctp_frag_point(asoc, pmtu);
}
- SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n",
- __func__, asoc, asoc->pathmtu, asoc->frag_point);
+ pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
+ asoc->pathmtu, asoc->frag_point);
}
/* Should we send a SACK to update our peer? */
-static inline int sctp_peer_needs_update(struct sctp_association *asoc)
+static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
{
struct net *net = sock_net(asoc->base.sk);
switch (asoc->state) {
@@ -1454,12 +1430,12 @@
((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
(asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
asoc->pathmtu)))
- return 1;
+ return true;
break;
default:
break;
}
- return 0;
+ return false;
}
/* Increase asoc's rwnd by len and send any window update SACK if needed. */
@@ -1490,9 +1466,9 @@
asoc->rwnd_press -= change;
}
- SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) "
- "- %u\n", __func__, asoc, len, asoc->rwnd,
- asoc->rwnd_over, asoc->a_rwnd);
+ pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
+ __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
+ asoc->a_rwnd);
/* Send a window update SACK if the rwnd has increased by at least the
* minimum of the association's PMTU and half of the receive buffer.
@@ -1501,9 +1477,11 @@
*/
if (sctp_peer_needs_update(asoc)) {
asoc->a_rwnd = asoc->rwnd;
- SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p "
- "rwnd: %u a_rwnd: %u\n", __func__,
- asoc, asoc->rwnd, asoc->a_rwnd);
+
+ pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
+ "a_rwnd:%u\n", __func__, asoc, asoc->rwnd,
+ asoc->a_rwnd);
+
sack = sctp_make_sack(asoc);
if (!sack)
return;
@@ -1525,8 +1503,10 @@
int rx_count;
int over = 0;
- SCTP_ASSERT(asoc->rwnd, "rwnd zero", return);
- SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return);
+ if (unlikely(!asoc->rwnd || asoc->rwnd_over))
+ pr_debug("%s: association:%p has asoc->rwnd:%u, "
+ "asoc->rwnd_over:%u!\n", __func__, asoc,
+ asoc->rwnd, asoc->rwnd_over);
if (asoc->ep->rcvbuf_policy)
rx_count = atomic_read(&asoc->rmem_alloc);
@@ -1535,7 +1515,7 @@
/* If we've reached or overflowed our receive buffer, announce
* a 0 rwnd if rwnd would still be positive. Store the
- * the pottential pressure overflow so that the window can be restored
+ * the potential pressure overflow so that the window can be restored
* back to original value.
*/
if (rx_count >= asoc->base.sk->sk_rcvbuf)
@@ -1551,9 +1531,10 @@
asoc->rwnd_over = len - asoc->rwnd;
asoc->rwnd = 0;
}
- SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u, %u)\n",
- __func__, asoc, len, asoc->rwnd,
- asoc->rwnd_over, asoc->rwnd_press);
+
+ pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
+ __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
+ asoc->rwnd_press);
}
/* Build the bind address list for the association based on info from the
@@ -1609,7 +1590,7 @@
/* Set an association id for a given association */
int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
{
- bool preload = gfp & __GFP_WAIT;
+ bool preload = gfpflags_allow_blocking(gfp);
int ret;
/* If the id is already assigned, keep it. */