Commit ea2dfb37 authored by Vlad Yasevich's avatar Vlad Yasevich

SCTP: properly clean up fragment and ordering queues during FWD-TSN.

When we recieve a FWD-TSN (meaning the peer has abandoned the data),
we need to clean up any partially received messages that may be
hanging out on the re-assembly or re-ordering queues.  This is
a MUST requirement that was not properly done before.
Signed-off-by: default avatarVlad Yasevich <vladislav.yasevich@hp.com.>
parent b07d68b5
...@@ -83,6 +83,7 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc); ...@@ -83,6 +83,7 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc);
/* Skip over an SSN. */ /* Skip over an SSN. */
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn); void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);
void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32);
#endif /* __sctp_ulpqueue_h__ */ #endif /* __sctp_ulpqueue_h__ */
......
...@@ -1130,6 +1130,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, ...@@ -1130,6 +1130,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
/* Move the Cumulattive TSN Ack ahead. */ /* Move the Cumulattive TSN Ack ahead. */
sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
/* purge the fragmentation queue */
sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
/* Abort any in progress partial delivery. */ /* Abort any in progress partial delivery. */
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
break; break;
......
...@@ -659,6 +659,46 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u ...@@ -659,6 +659,46 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u
return retval; return retval;
} }
/*
* Flush out stale fragments from the reassembly queue when processing
* a Forward TSN.
*
* RFC 3758, Section 3.6
*
* After receiving and processing a FORWARD TSN, the data receiver MUST
* take cautions in updating its re-assembly queue. The receiver MUST
* remove any partially reassembled message, which is still missing one
* or more TSNs earlier than or equal to the new cumulative TSN point.
* In the event that the receiver has invoked the partial delivery API,
* a notification SHOULD also be generated to inform the upper layer API
* that the message being partially delivered will NOT be completed.
*/
void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
{
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *event;
__u32 tsn;
if (skb_queue_empty(&ulpq->reasm))
return;
skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
event = sctp_skb2event(pos);
tsn = event->tsn;
/* Since the entire message must be abandoned by the
* sender (item A3 in Section 3.5, RFC 3758), we can
* free all fragments on the list that are less then
* or equal to ctsn_point
*/
if (TSN_lte(tsn, fwd_tsn)) {
__skb_unlink(pos, &ulpq->reasm);
sctp_ulpevent_free(event);
} else
break;
}
}
/* Helper function to gather skbs that have possibly become /* Helper function to gather skbs that have possibly become
* ordered by an an incoming chunk. * ordered by an an incoming chunk.
*/ */
...@@ -794,7 +834,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, ...@@ -794,7 +834,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
/* Helper function to gather skbs that have possibly become /* Helper function to gather skbs that have possibly become
* ordered by forward tsn skipping their dependencies. * ordered by forward tsn skipping their dependencies.
*/ */
static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
{ {
struct sk_buff *pos, *tmp; struct sk_buff *pos, *tmp;
struct sctp_ulpevent *cevent; struct sctp_ulpevent *cevent;
...@@ -813,31 +853,40 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) ...@@ -813,31 +853,40 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
csid = cevent->stream; csid = cevent->stream;
cssn = cevent->ssn; cssn = cevent->ssn;
if (cssn != sctp_ssn_peek(in, csid)) /* Have we gone too far? */
if (csid > sid)
break; break;
/* Found it, so mark in the ssnmap. */ /* Have we not gone far enough? */
sctp_ssn_next(in, csid); if (csid < sid)
continue;
/* see if this ssn has been marked by skipping */
if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
break;
__skb_unlink(pos, &ulpq->lobby); __skb_unlink(pos, &ulpq->lobby);
if (!event) { if (!event)
/* Create a temporary list to collect chunks on. */ /* Create a temporary list to collect chunks on. */
event = sctp_skb2event(pos); event = sctp_skb2event(pos);
__skb_queue_tail(&temp, sctp_event2skb(event));
} else {
/* Attach all gathered skbs to the event. */ /* Attach all gathered skbs to the event. */
__skb_queue_tail(&temp, pos); __skb_queue_tail(&temp, pos);
} }
}
/* Send event to the ULP. 'event' is the sctp_ulpevent for /* Send event to the ULP. 'event' is the sctp_ulpevent for
* very first SKB on the 'temp' list. * very first SKB on the 'temp' list.
*/ */
if (event) if (event) {
/* see if we have more ordered that we can deliver */
sctp_ulpq_retrieve_ordered(ulpq, event);
sctp_ulpq_tail_event(ulpq, event); sctp_ulpq_tail_event(ulpq, event);
}
} }
/* Skip over an SSN. */ /* Skip over an SSN. This is used during the processing of
* Forwared TSN chunk to skip over the abandoned ordered data
*/
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
{ {
struct sctp_stream *in; struct sctp_stream *in;
...@@ -855,7 +904,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) ...@@ -855,7 +904,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
/* Go find any other chunks that were waiting for /* Go find any other chunks that were waiting for
* ordering and deliver them if needed. * ordering and deliver them if needed.
*/ */
sctp_ulpq_reap_ordered(ulpq); sctp_ulpq_reap_ordered(ulpq, sid);
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment