Commit f7aec129 authored by David Howells's avatar David Howells Committed by David S. Miller

rxrpc: Cache the congestion window setting

Cache the congestion window setting that was determined during a call's
transmission phase when it finishes so that it can be used by the next call
to the same peer, thereby shortcutting the slow-start algorithm.

The value is stored in the rxrpc_peer struct and is accessed without
locking.  Each call takes the value that happens to be there when it starts
and just overwrites the value when it finishes.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0430a260
...@@ -300,6 +300,8 @@ struct rxrpc_peer { ...@@ -300,6 +300,8 @@ struct rxrpc_peer {
u64 rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */ u64 rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */
u8 rtt_cursor; /* next entry at which to insert */ u8 rtt_cursor; /* next entry at which to insert */
u8 rtt_usage; /* amount of cache actually used */ u8 rtt_usage; /* amount of cache actually used */
u8 cong_cwnd; /* Congestion window size */
}; };
/* /*
......
...@@ -310,6 +310,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, ...@@ -310,6 +310,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
rxrpc_see_call(call); rxrpc_see_call(call);
call->conn = conn; call->conn = conn;
call->peer = rxrpc_get_peer(conn->params.peer); call->peer = rxrpc_get_peer(conn->params.peer);
call->cong_cwnd = call->peer->cong_cwnd;
return call; return call;
} }
......
...@@ -136,12 +136,7 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) ...@@ -136,12 +136,7 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
call->tx_winsize = 16; call->tx_winsize = 16;
call->rx_expect_next = 1; call->rx_expect_next = 1;
if (RXRPC_TX_SMSS > 2190) call->cong_cwnd = 2;
call->cong_cwnd = 2;
else if (RXRPC_TX_SMSS > 1095)
call->cong_cwnd = 3;
else
call->cong_cwnd = 4;
call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1; call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
return call; return call;
......
...@@ -292,6 +292,12 @@ static int rxrpc_get_client_conn(struct rxrpc_call *call, ...@@ -292,6 +292,12 @@ static int rxrpc_get_client_conn(struct rxrpc_call *call,
if (!cp->peer) if (!cp->peer)
goto error; goto error;
call->cong_cwnd = cp->peer->cong_cwnd;
if (call->cong_cwnd >= call->cong_ssthresh)
call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
else
call->cong_mode = RXRPC_CALL_SLOW_START;
/* If the connection is not meant to be exclusive, search the available /* If the connection is not meant to be exclusive, search the available
* connections to see if the connection we want to use already exists. * connections to see if the connection we want to use already exists.
*/ */
......
...@@ -193,6 +193,8 @@ void rxrpc_disconnect_call(struct rxrpc_call *call) ...@@ -193,6 +193,8 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
{ {
struct rxrpc_connection *conn = call->conn; struct rxrpc_connection *conn = call->conn;
call->peer->cong_cwnd = call->cong_cwnd;
spin_lock_bh(&conn->params.peer->lock); spin_lock_bh(&conn->params.peer->lock);
hlist_del_init(&call->error_link); hlist_del_init(&call->error_link);
spin_unlock_bh(&conn->params.peer->lock); spin_unlock_bh(&conn->params.peer->lock);
......
...@@ -228,6 +228,13 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp) ...@@ -228,6 +228,13 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
seqlock_init(&peer->service_conn_lock); seqlock_init(&peer->service_conn_lock);
spin_lock_init(&peer->lock); spin_lock_init(&peer->lock);
peer->debug_id = atomic_inc_return(&rxrpc_debug_id); peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
if (RXRPC_TX_SMSS > 2190)
peer->cong_cwnd = 2;
else if (RXRPC_TX_SMSS > 1095)
peer->cong_cwnd = 3;
else
peer->cong_cwnd = 4;
} }
_leave(" = %p", peer); _leave(" = %p", peer);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment