Commit 7667d445 authored by David S. Miller's avatar David S. Miller

Merge tag 'rxrpc-rewrite-20160930' of...

Merge tag 'rxrpc-rewrite-20160930' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs

David Howells says:

====================
rxrpc: More fixes and adjustments

This set of patches contains some more fixes and adjustments:

 (1) Actually display the retransmission indication previously added to the
     tx_data trace.

 (2) Switch to Congestion Avoidance mode properly at cwnd==ssthresh rather
     than relying on detection during an overshoot and correction.

 (3) Reduce ssthresh to the peer's declared receive window.

 (4) The offset field in rxrpc_skb_priv can be dispensed with and the error
     field is no longer used.  Get rid of them.

 (5) Keep the call timeouts as ktimes rather than jiffies to make it easier
     to deal with RTT-based timeout values in future.  Rounding to jiffies
     is still necessary when the system timer is set.

 (6) Fix the call timer handling to avoid retriggering of expired timeout
     actions.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 32986b55 405dea1d
......@@ -280,11 +280,12 @@ TRACE_EVENT(rxrpc_tx_data,
__entry->lose = lose;
),
TP_printk("c=%p DATA %08x q=%08x fl=%02x%s",
TP_printk("c=%p DATA %08x q=%08x fl=%02x%s%s",
__entry->call,
__entry->serial,
__entry->seq,
__entry->flags,
__entry->retrans ? " *RETRANS*" : "",
__entry->lose ? " *LOSE*" : "")
);
......@@ -452,17 +453,18 @@ TRACE_EVENT(rxrpc_rtt_rx,
TRACE_EVENT(rxrpc_timer,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
unsigned long now),
ktime_t now, unsigned long now_j),
TP_ARGS(call, why, now),
TP_ARGS(call, why, now, now_j),
TP_STRUCT__entry(
__field(struct rxrpc_call *, call )
__field(enum rxrpc_timer_trace, why )
__field(unsigned long, now )
__field(unsigned long, expire_at )
__field(unsigned long, ack_at )
__field(unsigned long, resend_at )
__field_struct(ktime_t, now )
__field_struct(ktime_t, expire_at )
__field_struct(ktime_t, ack_at )
__field_struct(ktime_t, resend_at )
__field(unsigned long, now_j )
__field(unsigned long, timer )
),
......@@ -473,17 +475,17 @@ TRACE_EVENT(rxrpc_timer,
__entry->expire_at = call->expire_at;
__entry->ack_at = call->ack_at;
__entry->resend_at = call->resend_at;
__entry->now_j = now_j;
__entry->timer = call->timer.expires;
),
TP_printk("c=%p %s now=%lx x=%ld a=%ld r=%ld t=%ld",
TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld",
__entry->call,
rxrpc_timer_traces[__entry->why],
__entry->now,
__entry->expire_at - __entry->now,
__entry->ack_at - __entry->now,
__entry->resend_at - __entry->now,
__entry->timer - __entry->now)
ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)),
ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)),
ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)),
__entry->timer - __entry->now_j)
);
TRACE_EVENT(rxrpc_rx_lose,
......
......@@ -144,9 +144,7 @@ struct rxrpc_skb_priv {
u8 nr_jumbo; /* Number of jumbo subpackets */
};
union {
unsigned int offset; /* offset into buffer of next read */
int remain; /* amount of space remaining for next write */
u32 error; /* network error code */
};
struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
......@@ -466,9 +464,9 @@ struct rxrpc_call {
struct rxrpc_connection *conn; /* connection carrying call */
struct rxrpc_peer *peer; /* Peer record for remote address */
struct rxrpc_sock __rcu *socket; /* socket responsible */
unsigned long ack_at; /* When deferred ACK needs to happen */
unsigned long resend_at; /* When next resend needs to happen */
unsigned long expire_at; /* When the call times out */
ktime_t ack_at; /* When deferred ACK needs to happen */
ktime_t resend_at; /* When next resend needs to happen */
ktime_t expire_at; /* When the call times out */
struct timer_list timer; /* Combined event timer */
struct work_struct processor; /* Event processor */
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
......@@ -807,7 +805,7 @@ int rxrpc_reject_call(struct rxrpc_sock *);
/*
* call_event.c
*/
void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace);
void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
enum rxrpc_propose_ack_trace);
void rxrpc_process_call(struct work_struct *);
......
......@@ -24,29 +24,53 @@
/*
* Set the timer
*/
void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why)
void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
ktime_t now)
{
unsigned long t, now = jiffies;
unsigned long t_j, now_j = jiffies;
ktime_t t;
bool queue = false;
read_lock_bh(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE) {
t = call->expire_at;
if (time_before_eq(t, now))
if (!ktime_after(t, now))
goto out;
if (time_after(call->resend_at, now) &&
time_before(call->resend_at, t))
if (!ktime_after(call->resend_at, now)) {
call->resend_at = call->expire_at;
if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
queue = true;
} else if (ktime_before(call->resend_at, t)) {
t = call->resend_at;
}
if (time_after(call->ack_at, now) &&
time_before(call->ack_at, t))
if (!ktime_after(call->ack_at, now)) {
call->ack_at = call->expire_at;
if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
queue = true;
} else if (ktime_before(call->ack_at, t)) {
t = call->ack_at;
}
if (call->timer.expires != t || !timer_pending(&call->timer)) {
mod_timer(&call->timer, t);
trace_rxrpc_timer(call, why, now);
t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
t_j += jiffies;
/* We have to make sure that the calculated jiffies value falls
* at or after the nsec value, or we may loop ceaselessly
* because the timer times out, but we haven't reached the nsec
* timeout yet.
*/
t_j++;
if (call->timer.expires != t_j || !timer_pending(&call->timer)) {
mod_timer(&call->timer, t_j);
trace_rxrpc_timer(call, why, now, now_j);
}
if (queue)
rxrpc_queue_call(call);
}
out:
......@@ -62,7 +86,8 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
enum rxrpc_propose_ack_trace why)
{
enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
unsigned long now, ack_at, expiry = rxrpc_soft_ack_delay;
unsigned int expiry = rxrpc_soft_ack_delay;
ktime_t now, ack_at;
s8 prior = rxrpc_ack_priority[ack_reason];
/* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
......@@ -111,7 +136,6 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
break;
}
now = jiffies;
if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
_debug("already scheduled");
} else if (immediate || expiry == 0) {
......@@ -120,11 +144,11 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
background)
rxrpc_queue_call(call);
} else {
ack_at = now + expiry;
_debug("deferred ACK %ld < %ld", expiry, call->ack_at - now);
if (time_before(ack_at, call->ack_at)) {
now = ktime_get_real();
ack_at = ktime_add_ms(now, expiry);
if (ktime_before(ack_at, call->ack_at)) {
call->ack_at = ack_at;
rxrpc_set_timer(call, rxrpc_timer_set_for_ack);
rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now);
}
}
......@@ -157,12 +181,12 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
/*
* Perform retransmission of NAK'd and unack'd packets.
*/
static void rxrpc_resend(struct rxrpc_call *call)
static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
{
struct rxrpc_skb_priv *sp;
struct sk_buff *skb;
rxrpc_seq_t cursor, seq, top;
ktime_t now = ktime_get_real(), max_age, oldest, resend_at, ack_ts;
ktime_t max_age, oldest, ack_ts;
int ix;
u8 annotation, anno_type, retrans = 0, unacked = 0;
......@@ -212,14 +236,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
}
resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
call->resend_at = jiffies +
nsecs_to_jiffies(ktime_to_ns(ktime_sub(resend_at, now))) +
1; /* We have to make sure that the calculated jiffies value
* falls at or after the nsec value, or we shall loop
* ceaselessly because the timer times out, but we haven't
* reached the nsec timeout yet.
*/
call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
if (unacked)
rxrpc_congestion_timeout(call);
......@@ -229,7 +246,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
* retransmitting data.
*/
if (!retrans) {
rxrpc_set_timer(call, rxrpc_timer_set_for_resend);
rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
spin_unlock_bh(&call->lock);
ack_ts = ktime_sub(now, call->acks_latest_ts);
if (ktime_to_ns(ack_ts) < call->peer->rtt)
......@@ -301,7 +318,7 @@ void rxrpc_process_call(struct work_struct *work)
{
struct rxrpc_call *call =
container_of(work, struct rxrpc_call, processor);
unsigned long now;
ktime_t now;
rxrpc_see_call(call);
......@@ -320,15 +337,14 @@ void rxrpc_process_call(struct work_struct *work)
goto out_put;
}
now = jiffies;
if (time_after_eq(now, call->expire_at)) {
now = ktime_get_real();
if (ktime_before(call->expire_at, now)) {
rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, ETIME);
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
goto recheck_state;
}
if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
time_after_eq(now, call->ack_at)) {
if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) {
call->ack_at = call->expire_at;
if (call->ackr_reason) {
rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
......@@ -336,13 +352,12 @@ void rxrpc_process_call(struct work_struct *work)
}
}
if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events) ||
time_after_eq(now, call->resend_at)) {
rxrpc_resend(call);
if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) {
rxrpc_resend(call, now);
goto recheck_state;
}
rxrpc_set_timer(call, rxrpc_timer_set_for_resend);
rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
/* other events may have been raised since we started checking */
if (call->events && call->state < RXRPC_CALL_COMPLETE) {
......
......@@ -19,11 +19,6 @@
#include <net/af_rxrpc.h>
#include "ar-internal.h"
/*
* Maximum lifetime of a call (in jiffies).
*/
unsigned int rxrpc_max_call_lifetime = 60 * HZ;
const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
[RXRPC_CALL_UNINITIALISED] = "Uninit ",
[RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
......@@ -76,10 +71,8 @@ static void rxrpc_call_timer_expired(unsigned long _call)
_enter("%d", call->debug_id);
if (call->state < RXRPC_CALL_COMPLETE) {
trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
rxrpc_queue_call(call);
}
if (call->state < RXRPC_CALL_COMPLETE)
rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real());
}
/*
......@@ -207,14 +200,14 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
*/
static void rxrpc_start_call_timer(struct rxrpc_call *call)
{
unsigned long expire_at;
ktime_t now = ktime_get_real(), expire_at;
expire_at = jiffies + rxrpc_max_call_lifetime;
expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime);
call->expire_at = expire_at;
call->ack_at = expire_at;
call->resend_at = expire_at;
call->timer.expires = expire_at + 1;
rxrpc_set_timer(call, rxrpc_timer_begin);
call->timer.expires = jiffies + LONG_MAX / 2;
rxrpc_set_timer(call, rxrpc_timer_begin, now);
}
/*
......
......@@ -276,7 +276,8 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
return 0;
case RXRPC_PACKET_TYPE_ABORT:
if (skb_copy_bits(skb, sp->offset, &wtmp, sizeof(wtmp)) < 0)
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
&wtmp, sizeof(wtmp)) < 0)
return -EPROTO;
abort_code = ntohl(wtmp);
_proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code);
......
......@@ -57,7 +57,7 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
call->cong_ssthresh = max_t(unsigned int,
summary->flight_size / 2, 2);
cwnd = 1;
if (cwnd > call->cong_ssthresh &&
if (cwnd >= call->cong_ssthresh &&
call->cong_mode == RXRPC_CALL_SLOW_START) {
call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
call->cong_tstamp = skb->tstamp;
......@@ -82,7 +82,7 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
goto packet_loss_detected;
if (summary->cumulative_acks > 0)
cwnd += 1;
if (cwnd > call->cong_ssthresh) {
if (cwnd >= call->cong_ssthresh) {
call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
call->cong_tstamp = skb->tstamp;
}
......@@ -161,7 +161,7 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
call->cong_dup_acks = 0;
call->cong_extra = 0;
call->cong_tstamp = skb->tstamp;
if (cwnd <= call->cong_ssthresh)
if (cwnd < call->cong_ssthresh)
call->cong_mode = RXRPC_CALL_SLOW_START;
else
call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
......@@ -328,7 +328,8 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
call->resend_at = call->expire_at;
call->ack_at = call->expire_at;
spin_unlock_bh(&call->lock);
rxrpc_set_timer(call, rxrpc_timer_init_for_reply);
rxrpc_set_timer(call, rxrpc_timer_init_for_reply,
ktime_get_real());
}
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
......@@ -358,7 +359,7 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
static bool rxrpc_validate_jumbo(struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
unsigned int offset = sp->offset;
unsigned int offset = sizeof(struct rxrpc_wire_header);
unsigned int len = skb->len;
int nr_jumbo = 1;
u8 flags = sp->hdr.flags;
......@@ -419,7 +420,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
u16 skew)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
unsigned int offset = sp->offset;
unsigned int offset = sizeof(struct rxrpc_wire_header);
unsigned int ix;
rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
rxrpc_seq_t seq = sp->hdr.seq, hard_ack;
......@@ -658,6 +659,8 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
rwind = RXRPC_RXTX_BUFF_SIZE - 1;
call->tx_winsize = rwind;
if (call->cong_ssthresh > rwind)
call->cong_ssthresh = rwind;
mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU));
......@@ -744,15 +747,16 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
} buf;
rxrpc_serial_t acked_serial;
rxrpc_seq_t first_soft_ack, hard_ack;
int nr_acks, offset;
int nr_acks, offset, ioffset;
_enter("");
if (skb_copy_bits(skb, sp->offset, &buf.ack, sizeof(buf.ack)) < 0) {
offset = sizeof(struct rxrpc_wire_header);
if (skb_copy_bits(skb, offset, &buf.ack, sizeof(buf.ack)) < 0) {
_debug("extraction failure");
return rxrpc_proto_abort("XAK", call, 0);
}
sp->offset += sizeof(buf.ack);
offset += sizeof(buf.ack);
acked_serial = ntohl(buf.ack.serial);
first_soft_ack = ntohl(buf.ack.firstPacket);
......@@ -790,9 +794,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
rxrpc_propose_ack_respond_to_ack);
}
offset = sp->offset + nr_acks + 3;
if (skb->len >= offset + sizeof(buf.info)) {
if (skb_copy_bits(skb, offset, &buf.info, sizeof(buf.info)) < 0)
ioffset = offset + nr_acks + 3;
if (skb->len >= ioffset + sizeof(buf.info)) {
if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
return rxrpc_proto_abort("XAI", call, 0);
rxrpc_input_ackinfo(call, skb, &buf.info);
}
......@@ -830,7 +834,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
rxrpc_rotate_tx_window(call, hard_ack, &summary);
if (nr_acks > 0) {
if (skb_copy_bits(skb, sp->offset, buf.acks, nr_acks) < 0)
if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0)
return rxrpc_proto_abort("XSA", call, 0);
rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
&summary);
......@@ -878,7 +882,8 @@ static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
_enter("");
if (skb->len >= 4 &&
skb_copy_bits(skb, sp->offset, &wtmp, sizeof(wtmp)) >= 0)
skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
&wtmp, sizeof(wtmp)) >= 0)
abort_code = ntohl(wtmp);
_proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code);
......@@ -994,7 +999,6 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
sp->hdr.securityIndex = whdr.securityIndex;
sp->hdr._rsvd = ntohs(whdr._rsvd);
sp->hdr.serviceId = ntohs(whdr.serviceId);
sp->offset = sizeof(whdr);
return 0;
}
......
......@@ -95,7 +95,8 @@ void rxrpc_process_local_events(struct rxrpc_local *local)
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_VERSION:
if (skb_copy_bits(skb, sp->offset, &v, 1) < 0)
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
&v, 1) < 0)
return;
_proto("Rx VERSION { %02x }", v);
if (v == 0)
......
......@@ -20,29 +20,34 @@
*/
unsigned int rxrpc_max_backlog __read_mostly = 10;
/*
* Maximum lifetime of a call (in mx).
*/
unsigned int rxrpc_max_call_lifetime = 60 * 1000;
/*
* How long to wait before scheduling ACK generation after seeing a
* packet with RXRPC_REQUEST_ACK set (in jiffies).
* packet with RXRPC_REQUEST_ACK set (in ms).
*/
unsigned int rxrpc_requested_ack_delay = 1;
/*
* How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
* How long to wait before scheduling an ACK with subtype DELAY (in ms).
*
* We use this when we've received new data packets. If those packets aren't
* all consumed within this time we will send a DELAY ACK if an ACK was not
* requested to let the sender know it doesn't need to resend.
*/
unsigned int rxrpc_soft_ack_delay = 1 * HZ;
unsigned int rxrpc_soft_ack_delay = 1 * 1000;
/*
* How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
* How long to wait before scheduling an ACK with subtype IDLE (in ms).
*
* We use this when we've consumed some previously soft-ACK'd packets when
* further packets aren't immediately received to decide when to send an IDLE
* ACK let the other end know that it can free up its Tx buffer space.
*/
unsigned int rxrpc_idle_ack_delay = 0.5 * HZ;
unsigned int rxrpc_idle_ack_delay = 0.5 * 1000;
/*
* Receive window size in packets. This indicates the maximum number of
......
......@@ -261,15 +261,13 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
u8 *_annotation,
unsigned int *_offset, unsigned int *_len)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
unsigned int offset = *_offset;
unsigned int offset = sizeof(struct rxrpc_wire_header);
unsigned int len = *_len;
int ret;
u8 annotation = *_annotation;
/* Locate the subpacket */
offset = sp->offset;
len = skb->len - sp->offset;
len = skb->len - offset;
if ((annotation & RXRPC_RX_ANNO_JUMBO) > 0) {
offset += (((annotation & RXRPC_RX_ANNO_JUMBO) - 1) *
RXRPC_JUMBO_SUBPKTLEN);
......
......@@ -771,7 +771,8 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
}
abort_code = RXKADPACKETSHORT;
if (skb_copy_bits(skb, sp->offset, &challenge, sizeof(challenge)) < 0)
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
&challenge, sizeof(challenge)) < 0)
goto protocol_error;
version = ntohl(challenge.version);
......@@ -1028,7 +1029,8 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
_enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
abort_code = RXKADPACKETSHORT;
if (skb_copy_bits(skb, sp->offset, &response, sizeof(response)) < 0)
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
&response, sizeof(response)) < 0)
goto protocol_error;
if (!pskb_pull(skb, sizeof(response)))
BUG();
......@@ -1057,7 +1059,8 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
return -ENOMEM;
abort_code = RXKADPACKETSHORT;
if (skb_copy_bits(skb, sp->offset, ticket, ticket_len) < 0)
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
ticket, ticket_len) < 0)
goto protocol_error_free;
ret = rxkad_decrypt_ticket(conn, ticket, ticket_len, &session_key,
......
......@@ -149,13 +149,13 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
_debug("need instant resend %d", ret);
rxrpc_instant_resend(call, ix);
} else {
unsigned long resend_at;
ktime_t now = ktime_get_real(), resend_at;
resend_at = jiffies + msecs_to_jiffies(rxrpc_resend_timeout);
resend_at = ktime_add_ms(now, rxrpc_resend_timeout);
if (time_before(resend_at, call->resend_at)) {
if (ktime_before(resend_at, call->resend_at)) {
call->resend_at = resend_at;
rxrpc_set_timer(call, rxrpc_timer_set_for_send);
rxrpc_set_timer(call, rxrpc_timer_set_for_send, now);
}
}
......
......@@ -35,7 +35,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
.data = &rxrpc_requested_ack_delay,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
.proc_handler = proc_dointvec,
.extra1 = (void *)&zero,
},
{
......@@ -43,7 +43,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
.data = &rxrpc_soft_ack_delay,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
.proc_handler = proc_dointvec,
.extra1 = (void *)&one,
},
{
......@@ -51,7 +51,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
.data = &rxrpc_idle_ack_delay,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
.proc_handler = proc_dointvec,
.extra1 = (void *)&one,
},
{
......@@ -85,7 +85,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
.data = &rxrpc_max_call_lifetime,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
.proc_handler = proc_dointvec,
.extra1 = (void *)&one,
},
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment