Commit b9622ed4 authored by David S. Miller's avatar David S. Miller

Merge branch 'sctp-stream-interleave-part-1'

Xin Long says:

====================
sctp: Implement Stream Interleave: The I-DATA Chunk Supporting User Message Interleaving

Stream Interleave would be Implemented in two Parts:

   1. The I-DATA Chunk Supporting User Message Interleaving
   2. Interaction with Other SCTP Extensions

Overview in section 1.1 of RFC8260 for Part 1:

   This document describes a new chunk carrying payload data called
   I-DATA.  This chunk incorporates the properties of the current SCTP
   DATA chunk, all the flags and fields except the Stream Sequence
   Number (SSN), and also adds two new fields in its chunk header -- the
   Fragment Sequence Number (FSN) and the Message Identifier (MID).  The
   FSN is only used for reassembling all fragments that have the same
   MID and the same ordering property.  The TSN is only used for the
   reliable transfer in combination with Selective Acknowledgment (SACK)
   chunks.

   In addition, the MID is also used for ensuring ordered delivery
   instead of using the stream sequence number (the I-DATA chunk omits
   an SSN).

As the 1st part of Stream Interleave Implementation, this patchset adds
an ops framework named sctp_stream_interleave with a bunch of stuff that
does lots of things needed somewhere.

Then it defines sctp_stream_interleave_0 to work for normal DATA chunks
and sctp_stream_interleave_1 for I-DATA chunks.

With these functions, hundreds of if-else checks for the different process
on I-DATA chunks would be avoided. Besides, very few codes could be shared
in these two function sets.

In this patchset, it adds some basic variables, structures and socket
options firstly, then implement these functions one by one to add the
procedures for ordered idata gradually, at last adjusts some codes to
make them work for unordered idata.

To make it safe to be implemented and also not break the normal data
chunk process, this feature can't be enabled to use until all stream
interleave codes are completely accomplished.

v1 -> v2:
  - fixed a checkpatch warning that a blank line was missed.
  - avoided a kbuild warning reported from gcc-4.9.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 92ff4264 13228238
...@@ -102,6 +102,9 @@ enum sctp_cid { ...@@ -102,6 +102,9 @@ enum sctp_cid {
/* AUTH Extension Section 4.1 */ /* AUTH Extension Section 4.1 */
SCTP_CID_AUTH = 0x0F, SCTP_CID_AUTH = 0x0F,
/* sctp ndata 5.1. I-DATA */
SCTP_CID_I_DATA = 0x40,
/* PR-SCTP Sec 3.2 */ /* PR-SCTP Sec 3.2 */
SCTP_CID_FWD_TSN = 0xC0, SCTP_CID_FWD_TSN = 0xC0,
...@@ -240,6 +243,23 @@ struct sctp_data_chunk { ...@@ -240,6 +243,23 @@ struct sctp_data_chunk {
struct sctp_datahdr data_hdr; struct sctp_datahdr data_hdr;
}; };
struct sctp_idatahdr {
__be32 tsn;
__be16 stream;
__be16 reserved;
__be32 mid;
union {
__u32 ppid;
__be32 fsn;
};
__u8 payload[0];
};
struct sctp_idata_chunk {
struct sctp_chunkhdr chunk_hdr;
struct sctp_idatahdr data_hdr;
};
/* DATA Chuck Specific Flags */ /* DATA Chuck Specific Flags */
enum { enum {
SCTP_DATA_MIDDLE_FRAG = 0x00, SCTP_DATA_MIDDLE_FRAG = 0x00,
......
...@@ -122,9 +122,12 @@ struct netns_sctp { ...@@ -122,9 +122,12 @@ struct netns_sctp {
/* Flag to indicate if PR-CONFIG is enabled. */ /* Flag to indicate if PR-CONFIG is enabled. */
int reconf_enable; int reconf_enable;
/* Flag to idicate if SCTP-AUTH is enabled */ /* Flag to indicate if SCTP-AUTH is enabled */
int auth_enable; int auth_enable;
/* Flag to indicate if stream interleave is enabled */
int intl_enable;
/* /*
* Policy to control SCTP IPv4 address scoping * Policy to control SCTP IPv4 address scoping
* 0 - Disable IPv4 address scoping * 0 - Disable IPv4 address scoping
......
...@@ -145,12 +145,13 @@ SCTP_SUBTYPE_CONSTRUCTOR(OTHER, enum sctp_event_other, other) ...@@ -145,12 +145,13 @@ SCTP_SUBTYPE_CONSTRUCTOR(OTHER, enum sctp_event_other, other)
SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, enum sctp_event_primitive, primitive) SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, enum sctp_event_primitive, primitive)
#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA) #define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA || \
a->chunk_hdr->type == SCTP_CID_I_DATA)
/* Calculate the actual data size in a data chunk */ /* Calculate the actual data size in a data chunk */
#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end)\ #define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end) - \
- (unsigned long)(c->chunk_hdr)\ (unsigned long)(c->chunk_hdr) - \
- sizeof(struct sctp_data_chunk))) sctp_datachk_len(&c->asoc->stream)))
/* Internal error codes */ /* Internal error codes */
enum sctp_ierror { enum sctp_ierror {
......
...@@ -444,13 +444,13 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu) ...@@ -444,13 +444,13 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
int frag = pmtu; int frag = pmtu;
frag -= sp->pf->af->net_header_len; frag -= sp->pf->af->net_header_len;
frag -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk); frag -= sizeof(struct sctphdr) + sctp_datachk_len(&asoc->stream);
if (asoc->user_frag) if (asoc->user_frag)
frag = min_t(int, frag, asoc->user_frag); frag = min_t(int, frag, asoc->user_frag);
frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN - frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN -
sizeof(struct sctp_data_chunk))); sctp_datachk_len(&asoc->stream)));
return frag; return frag;
} }
......
...@@ -197,10 +197,11 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc, ...@@ -197,10 +197,11 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc, struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc,
const __u32 lowest_tsn, const __u32 lowest_tsn,
const struct sctp_chunk *chunk); const struct sctp_chunk *chunk);
struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc, struct sctp_chunk *sctp_make_idata(const struct sctp_association *asoc,
__u8 flags, int paylen, gfp_t gfp);
struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc,
const struct sctp_sndrcvinfo *sinfo, const struct sctp_sndrcvinfo *sinfo,
int len, const __u8 flags, int len, __u8 flags, gfp_t gfp);
__u16 ssn, gfp_t gfp);
struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc, struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
const __u32 lowest_tsn); const __u32 lowest_tsn);
struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc); struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc);
...@@ -342,7 +343,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk) ...@@ -342,7 +343,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
__u16 size; __u16 size;
size = ntohs(chunk->chunk_hdr->length); size = ntohs(chunk->chunk_hdr->length);
size -= sizeof(struct sctp_data_chunk); size -= sctp_datahdr_len(&chunk->asoc->stream);
return size; return size;
} }
...@@ -358,6 +359,12 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk) ...@@ -358,6 +359,12 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
typecheck(__u32, b) && \ typecheck(__u32, b) && \
((__s32)((a) - (b)) <= 0)) ((__s32)((a) - (b)) <= 0))
/* Compare two MIDs */
#define MID_lt(a, b) \
(typecheck(__u32, a) && \
typecheck(__u32, b) && \
((__s32)((a) - (b)) < 0))
/* Compare two SSNs */ /* Compare two SSNs */
#define SSN_lt(a,b) \ #define SSN_lt(a,b) \
(typecheck(__u16, a) && \ (typecheck(__u16, a) && \
......
/* SCTP kernel implementation
* (C) Copyright Red Hat Inc. 2017
*
* These are definitions used by the stream schedulers, defined in RFC
* draft ndata (https://tools.ietf.org/html/draft-ietf-tsvwg-sctp-ndata-11)
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email addresses:
* lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Xin Long <lucien.xin@gmail.com>
*/
#ifndef __sctp_stream_interleave_h__
#define __sctp_stream_interleave_h__
struct sctp_stream_interleave {
__u16 data_chunk_len;
/* (I-)DATA process */
struct sctp_chunk *(*make_datafrag)(const struct sctp_association *asoc,
const struct sctp_sndrcvinfo *sinfo,
int len, __u8 flags, gfp_t gfp);
void (*assign_number)(struct sctp_chunk *chunk);
bool (*validate_data)(struct sctp_chunk *chunk);
int (*ulpevent_data)(struct sctp_ulpq *ulpq,
struct sctp_chunk *chunk, gfp_t gfp);
int (*enqueue_event)(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event);
void (*renege_events)(struct sctp_ulpq *ulpq,
struct sctp_chunk *chunk, gfp_t gfp);
void (*start_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
void (*abort_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
};
void sctp_stream_interleave_init(struct sctp_stream *stream);
#endif /* __sctp_stream_interleave_h__ */
...@@ -89,6 +89,7 @@ struct sctp_stream; ...@@ -89,6 +89,7 @@ struct sctp_stream;
#include <net/sctp/tsnmap.h> #include <net/sctp/tsnmap.h>
#include <net/sctp/ulpevent.h> #include <net/sctp/ulpevent.h>
#include <net/sctp/ulpqueue.h> #include <net/sctp/ulpqueue.h>
#include <net/sctp/stream_interleave.h>
/* Structures useful for managing bind/connect. */ /* Structures useful for managing bind/connect. */
...@@ -217,6 +218,7 @@ struct sctp_sock { ...@@ -217,6 +218,7 @@ struct sctp_sock {
disable_fragments:1, disable_fragments:1,
v4mapped:1, v4mapped:1,
frag_interleave:1, frag_interleave:1,
strm_interleave:1,
recvrcvinfo:1, recvrcvinfo:1,
recvnxtinfo:1, recvnxtinfo:1,
data_ready_signalled:1; data_ready_signalled:1;
...@@ -397,6 +399,28 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new); ...@@ -397,6 +399,28 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new);
#define sctp_ssn_skip(stream, type, sid, ssn) \ #define sctp_ssn_skip(stream, type, sid, ssn) \
((stream)->type[sid].ssn = ssn + 1) ((stream)->type[sid].ssn = ssn + 1)
/* What is the current MID number for this stream? */
#define sctp_mid_peek(stream, type, sid) \
((stream)->type[sid].mid)
/* Return the next MID number for this stream. */
#define sctp_mid_next(stream, type, sid) \
((stream)->type[sid].mid++)
/* Skip over this mid and all below. */
#define sctp_mid_skip(stream, type, sid, mid) \
((stream)->type[sid].mid = mid + 1)
#define sctp_stream_in(asoc, sid) (&(asoc)->stream.in[sid])
/* What is the current MID_uo number for this stream? */
#define sctp_mid_uo_peek(stream, type, sid) \
((stream)->type[sid].mid_uo)
/* Return the next MID_uo number for this stream. */
#define sctp_mid_uo_next(stream, type, sid) \
((stream)->type[sid].mid_uo++)
/* /*
* Pointers to address related SCTP functions. * Pointers to address related SCTP functions.
* (i.e. things that depend on the address family.) * (i.e. things that depend on the address family.)
...@@ -574,6 +598,7 @@ struct sctp_chunk { ...@@ -574,6 +598,7 @@ struct sctp_chunk {
struct sctp_addiphdr *addip_hdr; struct sctp_addiphdr *addip_hdr;
struct sctp_fwdtsn_hdr *fwdtsn_hdr; struct sctp_fwdtsn_hdr *fwdtsn_hdr;
struct sctp_authhdr *auth_hdr; struct sctp_authhdr *auth_hdr;
struct sctp_idatahdr *idata_hdr;
} subh; } subh;
__u8 *chunk_end; __u8 *chunk_end;
...@@ -620,6 +645,7 @@ struct sctp_chunk { ...@@ -620,6 +645,7 @@ struct sctp_chunk {
__u16 rtt_in_progress:1, /* This chunk used for RTT calc? */ __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */
has_tsn:1, /* Does this chunk have a TSN yet? */ has_tsn:1, /* Does this chunk have a TSN yet? */
has_ssn:1, /* Does this chunk have a SSN yet? */ has_ssn:1, /* Does this chunk have a SSN yet? */
#define has_mid has_ssn
singleton:1, /* Only chunk in the packet? */ singleton:1, /* Only chunk in the packet? */
end_of_packet:1, /* Last chunk in the packet? */ end_of_packet:1, /* Last chunk in the packet? */
ecn_ce_done:1, /* Have we processed the ECN CE bit? */ ecn_ce_done:1, /* Have we processed the ECN CE bit? */
...@@ -1357,13 +1383,25 @@ struct sctp_stream_out_ext { ...@@ -1357,13 +1383,25 @@ struct sctp_stream_out_ext {
}; };
struct sctp_stream_out { struct sctp_stream_out {
union {
__u32 mid;
__u16 ssn; __u16 ssn;
__u8 state; };
__u32 mid_uo;
struct sctp_stream_out_ext *ext; struct sctp_stream_out_ext *ext;
__u8 state;
}; };
struct sctp_stream_in { struct sctp_stream_in {
union {
__u32 mid;
__u16 ssn; __u16 ssn;
};
__u32 mid_uo;
__u32 fsn;
__u32 fsn_uo;
char pd_mode;
char pd_mode_uo;
}; };
struct sctp_stream { struct sctp_stream {
...@@ -1387,11 +1425,22 @@ struct sctp_stream { ...@@ -1387,11 +1425,22 @@ struct sctp_stream {
struct sctp_stream_out_ext *rr_next; struct sctp_stream_out_ext *rr_next;
}; };
}; };
struct sctp_stream_interleave *si;
}; };
#define SCTP_STREAM_CLOSED 0x00 #define SCTP_STREAM_CLOSED 0x00
#define SCTP_STREAM_OPEN 0x01 #define SCTP_STREAM_OPEN 0x01
static inline __u16 sctp_datachk_len(const struct sctp_stream *stream)
{
return stream->si->data_chunk_len;
}
static inline __u16 sctp_datahdr_len(const struct sctp_stream *stream)
{
return stream->si->data_chunk_len - sizeof(struct sctp_chunkhdr);
}
/* SCTP_GET_ASSOC_STATS counters */ /* SCTP_GET_ASSOC_STATS counters */
struct sctp_priv_assoc_stats { struct sctp_priv_assoc_stats {
/* Maximum observed rto in the association during subsequent /* Maximum observed rto in the association during subsequent
...@@ -1940,6 +1989,7 @@ struct sctp_association { ...@@ -1940,6 +1989,7 @@ struct sctp_association {
__u8 need_ecne:1, /* Need to send an ECNE Chunk? */ __u8 need_ecne:1, /* Need to send an ECNE Chunk? */
temp:1, /* Is it a temporary association? */ temp:1, /* Is it a temporary association? */
force_delay:1, force_delay:1,
intl_enable:1,
prsctp_enable:1, prsctp_enable:1,
reconf_enable:1; reconf_enable:1;
......
...@@ -45,19 +45,29 @@ ...@@ -45,19 +45,29 @@
/* A structure to carry information to the ULP (e.g. Sockets API) */ /* A structure to carry information to the ULP (e.g. Sockets API) */
/* Warning: This sits inside an skb.cb[] area. Be very careful of /* Warning: This sits inside an skb.cb[] area. Be very careful of
* growing this structure as it is at the maximum limit now. * growing this structure as it is at the maximum limit now.
*
* sctp_ulpevent is saved in sk->cb(48 bytes), whose last 4 bytes
* have been taken by sock_skb_cb, So here it has to use 'packed'
* to make sctp_ulpevent fit into the rest 44 bytes.
*/ */
struct sctp_ulpevent { struct sctp_ulpevent {
struct sctp_association *asoc; struct sctp_association *asoc;
struct sctp_chunk *chunk; struct sctp_chunk *chunk;
unsigned int rmem_len; unsigned int rmem_len;
union {
__u32 mid;
__u16 ssn;
};
union {
__u32 ppid; __u32 ppid;
__u32 fsn;
};
__u32 tsn; __u32 tsn;
__u32 cumtsn; __u32 cumtsn;
__u16 stream; __u16 stream;
__u16 ssn;
__u16 flags; __u16 flags;
__u16 msg_flags; __u16 msg_flags;
}; } __packed;
/* Retrieve the skb this event sits inside of. */ /* Retrieve the skb this event sits inside of. */
static inline struct sk_buff *sctp_event2skb(const struct sctp_ulpevent *ev) static inline struct sk_buff *sctp_event2skb(const struct sctp_ulpevent *ev)
...@@ -112,7 +122,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event( ...@@ -112,7 +122,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
struct sctp_ulpevent *sctp_ulpevent_make_pdapi( struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
const struct sctp_association *asoc, const struct sctp_association *asoc,
__u32 indication, gfp_t gfp); __u32 indication, __u32 sid, __u32 seq,
__u32 flags, gfp_t gfp);
struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication( struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication(
const struct sctp_association *asoc, gfp_t gfp); const struct sctp_association *asoc, gfp_t gfp);
...@@ -140,6 +151,10 @@ struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event( ...@@ -140,6 +151,10 @@ struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event(
const struct sctp_association *asoc, __u16 flags, const struct sctp_association *asoc, __u16 flags,
__u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp); __u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp);
struct sctp_ulpevent *sctp_make_reassembled_event(
struct net *net, struct sk_buff_head *queue,
struct sk_buff *f_frag, struct sk_buff *l_frag);
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
struct msghdr *); struct msghdr *);
void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event, void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
......
...@@ -45,6 +45,7 @@ struct sctp_ulpq { ...@@ -45,6 +45,7 @@ struct sctp_ulpq {
char pd_mode; char pd_mode;
struct sctp_association *asoc; struct sctp_association *asoc;
struct sk_buff_head reasm; struct sk_buff_head reasm;
struct sk_buff_head reasm_uo;
struct sk_buff_head lobby; struct sk_buff_head lobby;
}; };
...@@ -76,11 +77,8 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc); ...@@ -76,11 +77,8 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc);
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn); void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);
void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32); void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32);
#endif /* __sctp_ulpqueue_h__ */
__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
struct sk_buff_head *list, __u16 needed);
#endif /* __sctp_ulpqueue_h__ */
...@@ -125,6 +125,7 @@ typedef __s32 sctp_assoc_t; ...@@ -125,6 +125,7 @@ typedef __s32 sctp_assoc_t;
#define SCTP_SOCKOPT_PEELOFF_FLAGS 122 #define SCTP_SOCKOPT_PEELOFF_FLAGS 122
#define SCTP_STREAM_SCHEDULER 123 #define SCTP_STREAM_SCHEDULER 123
#define SCTP_STREAM_SCHEDULER_VALUE 124 #define SCTP_STREAM_SCHEDULER_VALUE 124
#define SCTP_INTERLEAVING_SUPPORTED 125
/* PR-SCTP policies */ /* PR-SCTP policies */
#define SCTP_PR_SCTP_NONE 0x0000 #define SCTP_PR_SCTP_NONE 0x0000
...@@ -459,6 +460,8 @@ struct sctp_pdapi_event { ...@@ -459,6 +460,8 @@ struct sctp_pdapi_event {
__u32 pdapi_length; __u32 pdapi_length;
__u32 pdapi_indication; __u32 pdapi_indication;
sctp_assoc_t pdapi_assoc_id; sctp_assoc_t pdapi_assoc_id;
__u32 pdapi_stream;
__u32 pdapi_seq;
}; };
enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, }; enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, };
......
...@@ -14,7 +14,7 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \ ...@@ -14,7 +14,7 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
tsnmap.o bind_addr.o socket.o primitive.o \ tsnmap.o bind_addr.o socket.o primitive.o \
output.o input.o debug.o stream.o auth.o \ output.o input.o debug.o stream.o auth.o \
offload.o stream_sched.o stream_sched_prio.o \ offload.o stream_sched.o stream_sched_prio.o \
stream_sched_rr.o stream_sched_rr.o stream_interleave.o
sctp_probe-y := probe.o sctp_probe-y := probe.o
......
...@@ -861,7 +861,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, ...@@ -861,7 +861,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
event = sctp_ulpevent_make_peer_addr_change(asoc, &addr, event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
0, spc_state, error, GFP_ATOMIC); 0, spc_state, error, GFP_ATOMIC);
if (event) if (event)
sctp_ulpq_tail_event(&asoc->ulpq, event); asoc->stream.si->enqueue_event(&asoc->ulpq, event);
} }
/* Select new active and retran paths. */ /* Select new active and retran paths. */
......
...@@ -124,7 +124,7 @@ static void sctp_datamsg_destroy(struct sctp_datamsg *msg) ...@@ -124,7 +124,7 @@ static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent, ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent,
error, GFP_ATOMIC); error, GFP_ATOMIC);
if (ev) if (ev)
sctp_ulpq_tail_event(&asoc->ulpq, ev); asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
} }
sctp_chunk_put(chunk); sctp_chunk_put(chunk);
...@@ -191,7 +191,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, ...@@ -191,7 +191,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
*/ */
max_data = asoc->pathmtu - max_data = asoc->pathmtu -
sctp_sk(asoc->base.sk)->pf->af->net_header_len - sctp_sk(asoc->base.sk)->pf->af->net_header_len -
sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk); sizeof(struct sctphdr) - sctp_datachk_len(&asoc->stream);
max_data = SCTP_TRUNC4(max_data); max_data = SCTP_TRUNC4(max_data);
/* If the the peer requested that we authenticate DATA chunks /* If the the peer requested that we authenticate DATA chunks
...@@ -264,8 +264,8 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, ...@@ -264,8 +264,8 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
frag |= SCTP_DATA_SACK_IMM; frag |= SCTP_DATA_SACK_IMM;
} }
chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, chunk = asoc->stream.si->make_datafrag(asoc, sinfo, len, frag,
0, GFP_KERNEL); GFP_KERNEL);
if (!chunk) { if (!chunk) {
err = -ENOMEM; err = -ENOMEM;
goto errout; goto errout;
......
...@@ -313,6 +313,7 @@ static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet, ...@@ -313,6 +313,7 @@ static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
/* We believe that this chunk is OK to add to the packet */ /* We believe that this chunk is OK to add to the packet */
switch (chunk->chunk_hdr->type) { switch (chunk->chunk_hdr->type) {
case SCTP_CID_DATA: case SCTP_CID_DATA:
case SCTP_CID_I_DATA:
/* Account for the data being in the packet */ /* Account for the data being in the packet */
sctp_packet_append_data(packet, chunk); sctp_packet_append_data(packet, chunk);
/* Disallow SACK bundling after DATA. */ /* Disallow SACK bundling after DATA. */
...@@ -724,7 +725,7 @@ static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet, ...@@ -724,7 +725,7 @@ static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
* or delay in hopes of bundling a full sized packet. * or delay in hopes of bundling a full sized packet.
*/ */
if (chunk->skb->len + q->out_qlen > transport->pathmtu - if (chunk->skb->len + q->out_qlen > transport->pathmtu -
packet->overhead - sizeof(struct sctp_data_chunk) - 4) packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4)
/* Enough data queued to fill a packet */ /* Enough data queued to fill a packet */
return SCTP_XMIT_OK; return SCTP_XMIT_OK;
...@@ -759,7 +760,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet, ...@@ -759,7 +760,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
asoc->peer.rwnd = rwnd; asoc->peer.rwnd = rwnd;
sctp_chunk_assign_tsn(chunk); sctp_chunk_assign_tsn(chunk);
sctp_chunk_assign_ssn(chunk); asoc->stream.si->assign_number(chunk);
} }
static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet, static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
......
...@@ -228,7 +228,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, ...@@ -228,7 +228,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
struct sctp_inithdr init; struct sctp_inithdr init;
union sctp_params addrs; union sctp_params addrs;
struct sctp_sock *sp; struct sctp_sock *sp;
__u8 extensions[4]; __u8 extensions[5];
size_t chunksize; size_t chunksize;
__be16 types[2]; __be16 types[2];
int num_ext = 0; int num_ext = 0;
...@@ -278,6 +278,11 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, ...@@ -278,6 +278,11 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
if (sp->adaptation_ind) if (sp->adaptation_ind)
chunksize += sizeof(aiparam); chunksize += sizeof(aiparam);
if (sp->strm_interleave) {
extensions[num_ext] = SCTP_CID_I_DATA;
num_ext += 1;
}
chunksize += vparam_len; chunksize += vparam_len;
/* Account for AUTH related parameters */ /* Account for AUTH related parameters */
...@@ -392,7 +397,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, ...@@ -392,7 +397,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
struct sctp_inithdr initack; struct sctp_inithdr initack;
union sctp_params addrs; union sctp_params addrs;
struct sctp_sock *sp; struct sctp_sock *sp;
__u8 extensions[4]; __u8 extensions[5];
size_t chunksize; size_t chunksize;
int num_ext = 0; int num_ext = 0;
int cookie_len; int cookie_len;
...@@ -442,6 +447,11 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, ...@@ -442,6 +447,11 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
if (sp->adaptation_ind) if (sp->adaptation_ind)
chunksize += sizeof(aiparam); chunksize += sizeof(aiparam);
if (asoc->intl_enable) {
extensions[num_ext] = SCTP_CID_I_DATA;
num_ext += 1;
}
if (asoc->peer.auth_capable) { if (asoc->peer.auth_capable) {
auth_random = (struct sctp_paramhdr *)asoc->c.auth_random; auth_random = (struct sctp_paramhdr *)asoc->c.auth_random;
chunksize += ntohs(auth_random->length); chunksize += ntohs(auth_random->length);
...@@ -711,38 +721,31 @@ struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc, ...@@ -711,38 +721,31 @@ struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
/* Make a DATA chunk for the given association from the provided /* Make a DATA chunk for the given association from the provided
* parameters. However, do not populate the data payload. * parameters. However, do not populate the data payload.
*/ */
struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc, struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc,
const struct sctp_sndrcvinfo *sinfo, const struct sctp_sndrcvinfo *sinfo,
int data_len, __u8 flags, __u16 ssn, int len, __u8 flags, gfp_t gfp)
gfp_t gfp)
{ {
struct sctp_chunk *retval; struct sctp_chunk *retval;
struct sctp_datahdr dp; struct sctp_datahdr dp;
int chunk_len;
/* We assign the TSN as LATE as possible, not here when /* We assign the TSN as LATE as possible, not here when
* creating the chunk. * creating the chunk.
*/ */
dp.tsn = 0; memset(&dp, 0, sizeof(dp));
dp.stream = htons(sinfo->sinfo_stream);
dp.ppid = sinfo->sinfo_ppid; dp.ppid = sinfo->sinfo_ppid;
dp.stream = htons(sinfo->sinfo_stream);
/* Set the flags for an unordered send. */ /* Set the flags for an unordered send. */
if (sinfo->sinfo_flags & SCTP_UNORDERED) { if (sinfo->sinfo_flags & SCTP_UNORDERED)
flags |= SCTP_DATA_UNORDERED; flags |= SCTP_DATA_UNORDERED;
dp.ssn = 0;
} else
dp.ssn = htons(ssn);
chunk_len = sizeof(dp) + data_len; retval = sctp_make_data(asoc, flags, sizeof(dp) + len, gfp);
retval = sctp_make_data(asoc, flags, chunk_len, gfp);
if (!retval) if (!retval)
goto nodata; return NULL;
retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp); retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo)); memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
nodata:
return retval; return retval;
} }
...@@ -1415,6 +1418,12 @@ static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, ...@@ -1415,6 +1418,12 @@ static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc,
return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen, gfp); return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen, gfp);
} }
struct sctp_chunk *sctp_make_idata(const struct sctp_association *asoc,
__u8 flags, int paylen, gfp_t gfp)
{
return _sctp_make_chunk(asoc, SCTP_CID_I_DATA, flags, paylen, gfp);
}
static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc, static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc,
__u8 type, __u8 flags, int paylen, __u8 type, __u8 flags, int paylen,
gfp_t gfp) gfp_t gfp)
...@@ -2032,6 +2041,10 @@ static void sctp_process_ext_param(struct sctp_association *asoc, ...@@ -2032,6 +2041,10 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
if (net->sctp.addip_enable) if (net->sctp.addip_enable)
asoc->peer.asconf_capable = 1; asoc->peer.asconf_capable = 1;
break; break;
case SCTP_CID_I_DATA:
if (sctp_sk(asoc->base.sk)->strm_interleave)
asoc->intl_enable = 1;
break;
default: default:
break; break;
} }
......
...@@ -632,7 +632,7 @@ static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands, ...@@ -632,7 +632,7 @@ static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands,
struct sctp_chunk *abort; struct sctp_chunk *abort;
/* Cancel any partial delivery in progress. */ /* Cancel any partial delivery in progress. */
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); asoc->stream.si->abort_pd(&asoc->ulpq, GFP_ATOMIC);
if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT) if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
...@@ -972,7 +972,7 @@ static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds, ...@@ -972,7 +972,7 @@ static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds,
if (!ev) if (!ev)
return; return;
sctp_ulpq_tail_event(&asoc->ulpq, ev); asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
switch (err_hdr->cause) { switch (err_hdr->cause) {
case SCTP_ERROR_UNKNOWN_CHUNK: case SCTP_ERROR_UNKNOWN_CHUNK:
...@@ -1058,7 +1058,7 @@ static void sctp_cmd_assoc_change(struct sctp_cmd_seq *commands, ...@@ -1058,7 +1058,7 @@ static void sctp_cmd_assoc_change(struct sctp_cmd_seq *commands,
asoc->c.sinit_max_instreams, asoc->c.sinit_max_instreams,
NULL, GFP_ATOMIC); NULL, GFP_ATOMIC);
if (ev) if (ev)
sctp_ulpq_tail_event(&asoc->ulpq, ev); asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
} }
/* Helper function to generate an adaptation indication event */ /* Helper function to generate an adaptation indication event */
...@@ -1070,7 +1070,7 @@ static void sctp_cmd_adaptation_ind(struct sctp_cmd_seq *commands, ...@@ -1070,7 +1070,7 @@ static void sctp_cmd_adaptation_ind(struct sctp_cmd_seq *commands,
ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC); ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
if (ev) if (ev)
sctp_ulpq_tail_event(&asoc->ulpq, ev); asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
} }
...@@ -1483,7 +1483,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type, ...@@ -1483,7 +1483,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n", pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n",
__func__, cmd->obj.chunk, &asoc->ulpq); __func__, cmd->obj.chunk, &asoc->ulpq);
sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk, asoc->stream.si->ulpevent_data(&asoc->ulpq,
cmd->obj.chunk,
GFP_ATOMIC); GFP_ATOMIC);
break; break;
...@@ -1492,7 +1493,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type, ...@@ -1492,7 +1493,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n", pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n",
__func__, cmd->obj.ulpevent, &asoc->ulpq); __func__, cmd->obj.ulpevent, &asoc->ulpq);
sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent); asoc->stream.si->enqueue_event(&asoc->ulpq,
cmd->obj.ulpevent);
break; break;
case SCTP_CMD_REPLY: case SCTP_CMD_REPLY:
...@@ -1729,11 +1731,12 @@ static int sctp_cmd_interpreter(enum sctp_event event_type, ...@@ -1729,11 +1731,12 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
break; break;
case SCTP_CMD_PART_DELIVER: case SCTP_CMD_PART_DELIVER:
sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC); asoc->stream.si->start_pd(&asoc->ulpq, GFP_ATOMIC);
break; break;
case SCTP_CMD_RENEGE: case SCTP_CMD_RENEGE:
sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk, asoc->stream.si->renege_events(&asoc->ulpq,
cmd->obj.chunk,
GFP_ATOMIC); GFP_ATOMIC);
break; break;
......
...@@ -3013,7 +3013,7 @@ enum sctp_disposition sctp_sf_eat_data_6_2(struct net *net, ...@@ -3013,7 +3013,7 @@ enum sctp_disposition sctp_sf_eat_data_6_2(struct net *net,
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
} }
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_data_chunk))) if (!sctp_chunk_length_valid(chunk, sctp_datachk_len(&asoc->stream)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands); commands);
...@@ -3034,7 +3034,7 @@ enum sctp_disposition sctp_sf_eat_data_6_2(struct net *net, ...@@ -3034,7 +3034,7 @@ enum sctp_disposition sctp_sf_eat_data_6_2(struct net *net,
case SCTP_IERROR_PROTO_VIOLATION: case SCTP_IERROR_PROTO_VIOLATION:
return sctp_sf_abort_violation(net, ep, asoc, chunk, commands, return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
(u8 *)chunk->subh.data_hdr, (u8 *)chunk->subh.data_hdr,
sizeof(struct sctp_datahdr)); sctp_datahdr_len(&asoc->stream));
default: default:
BUG(); BUG();
} }
...@@ -3133,7 +3133,7 @@ enum sctp_disposition sctp_sf_eat_data_fast_4_4( ...@@ -3133,7 +3133,7 @@ enum sctp_disposition sctp_sf_eat_data_fast_4_4(
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
} }
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_data_chunk))) if (!sctp_chunk_length_valid(chunk, sctp_datachk_len(&asoc->stream)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands); commands);
...@@ -3150,7 +3150,7 @@ enum sctp_disposition sctp_sf_eat_data_fast_4_4( ...@@ -3150,7 +3150,7 @@ enum sctp_disposition sctp_sf_eat_data_fast_4_4(
case SCTP_IERROR_PROTO_VIOLATION: case SCTP_IERROR_PROTO_VIOLATION:
return sctp_sf_abort_violation(net, ep, asoc, chunk, commands, return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
(u8 *)chunk->subh.data_hdr, (u8 *)chunk->subh.data_hdr,
sizeof(struct sctp_datahdr)); sctp_datahdr_len(&asoc->stream));
default: default:
BUG(); BUG();
} }
...@@ -6244,14 +6244,12 @@ static int sctp_eat_data(const struct sctp_association *asoc, ...@@ -6244,14 +6244,12 @@ static int sctp_eat_data(const struct sctp_association *asoc,
struct sctp_chunk *err; struct sctp_chunk *err;
enum sctp_verb deliver; enum sctp_verb deliver;
size_t datalen; size_t datalen;
u8 ordered = 0;
u16 ssn, sid;
__u32 tsn; __u32 tsn;
int tmp; int tmp;
data_hdr = (struct sctp_datahdr *)chunk->skb->data; data_hdr = (struct sctp_datahdr *)chunk->skb->data;
chunk->subh.data_hdr = data_hdr; chunk->subh.data_hdr = data_hdr;
skb_pull(chunk->skb, sizeof(*data_hdr)); skb_pull(chunk->skb, sctp_datahdr_len(&asoc->stream));
tsn = ntohl(data_hdr->tsn); tsn = ntohl(data_hdr->tsn);
pr_debug("%s: TSN 0x%x\n", __func__, tsn); pr_debug("%s: TSN 0x%x\n", __func__, tsn);
...@@ -6299,7 +6297,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, ...@@ -6299,7 +6297,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
* Actually, allow a little bit of overflow (up to a MTU). * Actually, allow a little bit of overflow (up to a MTU).
*/ */
datalen = ntohs(chunk->chunk_hdr->length); datalen = ntohs(chunk->chunk_hdr->length);
datalen -= sizeof(struct sctp_data_chunk); datalen -= sctp_datachk_len(&asoc->stream);
deliver = SCTP_CMD_CHUNK_ULP; deliver = SCTP_CMD_CHUNK_ULP;
...@@ -6394,7 +6392,6 @@ static int sctp_eat_data(const struct sctp_association *asoc, ...@@ -6394,7 +6392,6 @@ static int sctp_eat_data(const struct sctp_association *asoc,
SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS); SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS);
if (chunk->asoc) if (chunk->asoc)
chunk->asoc->stats.iodchunks++; chunk->asoc->stats.iodchunks++;
ordered = 1;
} }
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
...@@ -6405,8 +6402,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, ...@@ -6405,8 +6402,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
* with cause set to "Invalid Stream Identifier" (See Section 3.3.10) * with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
* and discard the DATA chunk. * and discard the DATA chunk.
*/ */
sid = ntohs(data_hdr->stream); if (ntohs(data_hdr->stream) >= asoc->stream.incnt) {
if (sid >= asoc->stream.incnt) {
/* Mark tsn as received even though we drop it */ /* Mark tsn as received even though we drop it */
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
...@@ -6427,8 +6423,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, ...@@ -6427,8 +6423,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
* SSN is smaller then the next expected one. If it is, it wrapped * SSN is smaller then the next expected one. If it is, it wrapped
* and is invalid. * and is invalid.
*/ */
ssn = ntohs(data_hdr->ssn); if (!asoc->stream.si->validate_data(chunk))
if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->stream, in, sid)))
return SCTP_IERROR_PROTO_VIOLATION; return SCTP_IERROR_PROTO_VIOLATION;
/* Send the data up to the user. Note: Schedule the /* Send the data up to the user. Note: Schedule the
......
...@@ -985,6 +985,9 @@ static const struct sctp_sm_table_entry *sctp_chunk_event_lookup( ...@@ -985,6 +985,9 @@ static const struct sctp_sm_table_entry *sctp_chunk_event_lookup(
if (state > SCTP_STATE_MAX) if (state > SCTP_STATE_MAX)
return &bug; return &bug;
if (net->sctp.intl_enable && cid == SCTP_CID_I_DATA)
cid = SCTP_CID_DATA;
if (cid <= SCTP_CID_BASE_MAX) if (cid <= SCTP_CID_BASE_MAX)
return &chunk_event_table[cid][state]; return &chunk_event_table[cid][state];
......
...@@ -201,6 +201,22 @@ static void sctp_for_each_tx_datachunk(struct sctp_association *asoc, ...@@ -201,6 +201,22 @@ static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
cb(chunk); cb(chunk);
} }
static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk,
void (*cb)(struct sk_buff *, struct sock *))
{
struct sk_buff *skb, *tmp;
sctp_skb_for_each(skb, &asoc->ulpq.lobby, tmp)
cb(skb, sk);
sctp_skb_for_each(skb, &asoc->ulpq.reasm, tmp)
cb(skb, sk);
sctp_skb_for_each(skb, &asoc->ulpq.reasm_uo, tmp)
cb(skb, sk);
}
/* Verify that this is a valid address. */ /* Verify that this is a valid address. */
static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
int len) int len)
...@@ -1554,6 +1570,7 @@ static void sctp_close(struct sock *sk, long timeout) ...@@ -1554,6 +1570,7 @@ static void sctp_close(struct sock *sk, long timeout)
if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
!skb_queue_empty(&asoc->ulpq.reasm) || !skb_queue_empty(&asoc->ulpq.reasm) ||
!skb_queue_empty(&asoc->ulpq.reasm_uo) ||
(sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
struct sctp_chunk *chunk; struct sctp_chunk *chunk;
...@@ -2002,7 +2019,20 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) ...@@ -2002,7 +2019,20 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
if (err < 0) if (err < 0)
goto out_free; goto out_free;
/* If stream interleave is enabled, wait_connect has to be
* done earlier than data enqueue, as it needs to make data
* or idata according to asoc->intl_enable which is set
* after connection is done.
*/
if (sctp_sk(asoc->base.sk)->strm_interleave) {
timeo = sock_sndtimeo(sk, 0);
err = sctp_wait_for_connect(asoc, &timeo);
if (err)
goto out_unlock;
} else {
wait_connect = true; wait_connect = true;
}
pr_debug("%s: we associated primitively\n", __func__); pr_debug("%s: we associated primitively\n", __func__);
} }
...@@ -2281,7 +2311,7 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval, ...@@ -2281,7 +2311,7 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
if (!event) if (!event)
return -ENOMEM; return -ENOMEM;
sctp_ulpq_tail_event(&asoc->ulpq, event); asoc->stream.si->enqueue_event(&asoc->ulpq, event);
} }
} }
...@@ -3180,7 +3210,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned ...@@ -3180,7 +3210,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
if (val == 0) { if (val == 0) {
val = asoc->pathmtu - sp->pf->af->net_header_len; val = asoc->pathmtu - sp->pf->af->net_header_len;
val -= sizeof(struct sctphdr) + val -= sizeof(struct sctphdr) +
sizeof(struct sctp_data_chunk); sctp_datachk_len(&asoc->stream);
} }
asoc->user_frag = val; asoc->user_frag = val;
asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
...@@ -3350,7 +3380,10 @@ static int sctp_setsockopt_fragment_interleave(struct sock *sk, ...@@ -3350,7 +3380,10 @@ static int sctp_setsockopt_fragment_interleave(struct sock *sk,
if (get_user(val, (int __user *)optval)) if (get_user(val, (int __user *)optval))
return -EFAULT; return -EFAULT;
sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; sctp_sk(sk)->frag_interleave = !!val;
if (!sctp_sk(sk)->frag_interleave)
sctp_sk(sk)->strm_interleave = 0;
return 0; return 0;
} }
...@@ -4019,6 +4052,40 @@ static int sctp_setsockopt_scheduler_value(struct sock *sk, ...@@ -4019,6 +4052,40 @@ static int sctp_setsockopt_scheduler_value(struct sock *sk,
return retval; return retval;
} }
static int sctp_setsockopt_interleaving_supported(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_sock *sp = sctp_sk(sk);
struct net *net = sock_net(sk);
struct sctp_assoc_value params;
int retval = -EINVAL;
if (optlen < sizeof(params))
goto out;
optlen = sizeof(params);
if (copy_from_user(&params, optval, optlen)) {
retval = -EFAULT;
goto out;
}
if (params.assoc_id)
goto out;
if (!net->sctp.intl_enable || !sp->frag_interleave) {
retval = -EPERM;
goto out;
}
sp->strm_interleave = !!params.assoc_value;
retval = 0;
out:
return retval;
}
/* API 6.2 setsockopt(), getsockopt() /* API 6.2 setsockopt(), getsockopt()
* *
* Applications use setsockopt() and getsockopt() to set or retrieve * Applications use setsockopt() and getsockopt() to set or retrieve
...@@ -4206,6 +4273,10 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, ...@@ -4206,6 +4273,10 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
case SCTP_STREAM_SCHEDULER_VALUE: case SCTP_STREAM_SCHEDULER_VALUE:
retval = sctp_setsockopt_scheduler_value(sk, optval, optlen); retval = sctp_setsockopt_scheduler_value(sk, optval, optlen);
break; break;
case SCTP_INTERLEAVING_SUPPORTED:
retval = sctp_setsockopt_interleaving_supported(sk, optval,
optlen);
break;
default: default:
retval = -ENOPROTOOPT; retval = -ENOPROTOOPT;
break; break;
...@@ -6969,6 +7040,47 @@ static int sctp_getsockopt_scheduler_value(struct sock *sk, int len, ...@@ -6969,6 +7040,47 @@ static int sctp_getsockopt_scheduler_value(struct sock *sk, int len,
return retval; return retval;
} }
static int sctp_getsockopt_interleaving_supported(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_assoc_value params;
struct sctp_association *asoc;
int retval = -EFAULT;
if (len < sizeof(params)) {
retval = -EINVAL;
goto out;
}
len = sizeof(params);
if (copy_from_user(&params, optval, len))
goto out;
asoc = sctp_id2assoc(sk, params.assoc_id);
if (asoc) {
params.assoc_value = asoc->intl_enable;
} else if (!params.assoc_id) {
struct sctp_sock *sp = sctp_sk(sk);
params.assoc_value = sp->strm_interleave;
} else {
retval = -EINVAL;
goto out;
}
if (put_user(len, optlen))
goto out;
if (copy_to_user(optval, &params, len))
goto out;
retval = 0;
out:
return retval;
}
static int sctp_getsockopt(struct sock *sk, int level, int optname, static int sctp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen) char __user *optval, int __user *optlen)
{ {
...@@ -7159,6 +7271,10 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname, ...@@ -7159,6 +7271,10 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
retval = sctp_getsockopt_scheduler_value(sk, len, optval, retval = sctp_getsockopt_scheduler_value(sk, len, optval,
optlen); optlen);
break; break;
case SCTP_INTERLEAVING_SUPPORTED:
retval = sctp_getsockopt_interleaving_supported(sk, len, optval,
optlen);
break;
default: default:
retval = -ENOPROTOOPT; retval = -ENOPROTOOPT;
break; break;
...@@ -8396,11 +8512,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, ...@@ -8396,11 +8512,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
} }
sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) sctp_for_each_rx_skb(assoc, newsk, sctp_skb_set_owner_r_frag);
sctp_skb_set_owner_r_frag(skb, newsk);
sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp)
sctp_skb_set_owner_r_frag(skb, newsk);
/* Set the type of socket to indicate that it is peeled off from the /* Set the type of socket to indicate that it is peeled off from the
* original UDP-style socket or created with the accept() call on a * original UDP-style socket or created with the accept() call on a
......
...@@ -167,6 +167,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt, ...@@ -167,6 +167,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
sched->init(stream); sched->init(stream);
in: in:
sctp_stream_interleave_init(stream);
if (!incnt) if (!incnt)
goto out; goto out;
......
/* SCTP kernel implementation
* (C) Copyright Red Hat Inc. 2017
*
* This file is part of the SCTP kernel implementation
*
* These functions manipulate sctp stream queue/scheduling.
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email addresched(es):
* lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Xin Long <lucien.xin@gmail.com>
*/
#include <net/busy_poll.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/ulpevent.h>
#include <linux/sctp.h>
static struct sctp_chunk *sctp_make_idatafrag_empty(
const struct sctp_association *asoc,
const struct sctp_sndrcvinfo *sinfo,
int len, __u8 flags, gfp_t gfp)
{
struct sctp_chunk *retval;
struct sctp_idatahdr dp;
memset(&dp, 0, sizeof(dp));
dp.stream = htons(sinfo->sinfo_stream);
if (sinfo->sinfo_flags & SCTP_UNORDERED)
flags |= SCTP_DATA_UNORDERED;
retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
if (!retval)
return NULL;
retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
return retval;
}
static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
{
struct sctp_stream *stream;
struct sctp_chunk *lchunk;
__u32 cfsn = 0;
__u16 sid;
if (chunk->has_mid)
return;
sid = sctp_chunk_stream_no(chunk);
stream = &chunk->asoc->stream;
list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
struct sctp_idatahdr *hdr;
__u32 mid;
lchunk->has_mid = 1;
hdr = lchunk->subh.idata_hdr;
if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
hdr->ppid = lchunk->sinfo.sinfo_ppid;
else
hdr->fsn = htonl(cfsn++);
if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
sctp_mid_uo_next(stream, out, sid) :
sctp_mid_uo_peek(stream, out, sid);
} else {
mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
sctp_mid_next(stream, out, sid) :
sctp_mid_peek(stream, out, sid);
}
hdr->mid = htonl(mid);
}
}
static bool sctp_validate_data(struct sctp_chunk *chunk)
{
const struct sctp_stream *stream;
__u16 sid, ssn;
if (chunk->chunk_hdr->type != SCTP_CID_DATA)
return false;
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
return true;
stream = &chunk->asoc->stream;
sid = sctp_chunk_stream_no(chunk);
ssn = ntohs(chunk->subh.data_hdr->ssn);
return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
}
static bool sctp_validate_idata(struct sctp_chunk *chunk)
{
struct sctp_stream *stream;
__u32 mid;
__u16 sid;
if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
return false;
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
return true;
stream = &chunk->asoc->stream;
sid = sctp_chunk_stream_no(chunk);
mid = ntohl(chunk->subh.idata_hdr->mid);
return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
}
static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sctp_ulpevent *cevent;
struct sk_buff *pos;
pos = skb_peek_tail(&ulpq->reasm);
if (!pos) {
__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
return;
}
cevent = sctp_skb2event(pos);
if (event->stream == cevent->stream &&
event->mid == cevent->mid &&
(cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
(!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
event->fsn > cevent->fsn))) {
__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
return;
}
if ((event->stream == cevent->stream &&
MID_lt(cevent->mid, event->mid)) ||
event->stream > cevent->stream) {
__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
return;
}
skb_queue_walk(&ulpq->reasm, pos) {
cevent = sctp_skb2event(pos);
if (event->stream < cevent->stream ||
(event->stream == cevent->stream &&
MID_lt(event->mid, cevent->mid)))
break;
if (event->stream == cevent->stream &&
event->mid == cevent->mid &&
!(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
(event->msg_flags & SCTP_DATA_FIRST_FRAG ||
event->fsn < cevent->fsn))
break;
}
__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
}
static struct sctp_ulpevent *sctp_intl_retrieve_partial(
struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sk_buff *first_frag = NULL;
struct sk_buff *last_frag = NULL;
struct sctp_ulpevent *retval;
struct sctp_stream_in *sin;
struct sk_buff *pos;
__u32 next_fsn = 0;
int is_last = 0;
sin = sctp_stream_in(ulpq->asoc, event->stream);
skb_queue_walk(&ulpq->reasm, pos) {
struct sctp_ulpevent *cevent = sctp_skb2event(pos);
if (cevent->stream < event->stream)
continue;
if (cevent->stream > event->stream ||
cevent->mid != sin->mid)
break;
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
case SCTP_DATA_FIRST_FRAG:
goto out;
case SCTP_DATA_MIDDLE_FRAG:
if (!first_frag) {
if (cevent->fsn == sin->fsn) {
first_frag = pos;
last_frag = pos;
next_fsn = cevent->fsn + 1;
}
} else if (cevent->fsn == next_fsn) {
last_frag = pos;
next_fsn++;
} else {
goto out;
}
break;
case SCTP_DATA_LAST_FRAG:
if (!first_frag) {
if (cevent->fsn == sin->fsn) {
first_frag = pos;
last_frag = pos;
next_fsn = 0;
is_last = 1;
}
} else if (cevent->fsn == next_fsn) {
last_frag = pos;
next_fsn = 0;
is_last = 1;
}
goto out;
default:
goto out;
}
}
out:
if (!first_frag)
return NULL;
retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
&ulpq->reasm, first_frag,
last_frag);
if (retval) {
sin->fsn = next_fsn;
if (is_last) {
retval->msg_flags |= MSG_EOR;
sin->pd_mode = 0;
}
}
return retval;
}
static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sctp_association *asoc = ulpq->asoc;
struct sk_buff *pos, *first_frag = NULL;
struct sctp_ulpevent *retval = NULL;
struct sk_buff *pd_first = NULL;
struct sk_buff *pd_last = NULL;
struct sctp_stream_in *sin;
__u32 next_fsn = 0;
__u32 pd_point = 0;
__u32 pd_len = 0;
__u32 mid = 0;
sin = sctp_stream_in(ulpq->asoc, event->stream);
skb_queue_walk(&ulpq->reasm, pos) {
struct sctp_ulpevent *cevent = sctp_skb2event(pos);
if (cevent->stream < event->stream)
continue;
if (cevent->stream > event->stream)
break;
if (MID_lt(cevent->mid, event->mid))
continue;
if (MID_lt(event->mid, cevent->mid))
break;
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
case SCTP_DATA_FIRST_FRAG:
if (cevent->mid == sin->mid) {
pd_first = pos;
pd_last = pos;
pd_len = pos->len;
}
first_frag = pos;
next_fsn = 0;
mid = cevent->mid;
break;
case SCTP_DATA_MIDDLE_FRAG:
if (first_frag && cevent->mid == mid &&
cevent->fsn == next_fsn) {
next_fsn++;
if (pd_first) {
pd_last = pos;
pd_len += pos->len;
}
} else {
first_frag = NULL;
}
break;
case SCTP_DATA_LAST_FRAG:
if (first_frag && cevent->mid == mid &&
cevent->fsn == next_fsn)
goto found;
else
first_frag = NULL;
break;
}
}
if (!pd_first)
goto out;
pd_point = sctp_sk(asoc->base.sk)->pd_point;
if (pd_point && pd_point <= pd_len) {
retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
&ulpq->reasm,
pd_first, pd_last);
if (retval) {
sin->fsn = next_fsn;
sin->pd_mode = 1;
}
}
goto out;
found:
retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
&ulpq->reasm,
first_frag, pos);
if (retval)
retval->msg_flags |= MSG_EOR;
out:
return retval;
}
static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sctp_ulpevent *retval = NULL;
struct sctp_stream_in *sin;
if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
event->msg_flags |= MSG_EOR;
return event;
}
sctp_intl_store_reasm(ulpq, event);
sin = sctp_stream_in(ulpq->asoc, event->stream);
if (sin->pd_mode && event->mid == sin->mid &&
event->fsn == sin->fsn)
retval = sctp_intl_retrieve_partial(ulpq, event);
if (!retval)
retval = sctp_intl_retrieve_reassembled(ulpq, event);
return retval;
}
static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sctp_ulpevent *cevent;
struct sk_buff *pos;
pos = skb_peek_tail(&ulpq->lobby);
if (!pos) {
__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
return;
}
cevent = (struct sctp_ulpevent *)pos->cb;
if (event->stream == cevent->stream &&
MID_lt(cevent->mid, event->mid)) {
__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
return;
}
if (event->stream > cevent->stream) {
__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
return;
}
skb_queue_walk(&ulpq->lobby, pos) {
cevent = (struct sctp_ulpevent *)pos->cb;
if (cevent->stream > event->stream)
break;
if (cevent->stream == event->stream &&
MID_lt(event->mid, cevent->mid))
break;
}
__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
}
static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sk_buff_head *event_list;
struct sctp_stream *stream;
struct sk_buff *pos, *tmp;
__u16 sid = event->stream;
stream = &ulpq->asoc->stream;
event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
if (cevent->stream > sid)
break;
if (cevent->stream < sid)
continue;
if (cevent->mid != sctp_mid_peek(stream, in, sid))
break;
sctp_mid_next(stream, in, sid);
__skb_unlink(pos, &ulpq->lobby);
__skb_queue_tail(event_list, pos);
}
}
static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sctp_stream *stream;
__u16 sid;
stream = &ulpq->asoc->stream;
sid = event->stream;
if (event->mid != sctp_mid_peek(stream, in, sid)) {
sctp_intl_store_ordered(ulpq, event);
return NULL;
}
sctp_mid_next(stream, in, sid);
sctp_intl_retrieve_ordered(ulpq, event);
return event;
}
static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sk_buff *skb = sctp_event2skb(event);
struct sock *sk = ulpq->asoc->base.sk;
struct sctp_sock *sp = sctp_sk(sk);
struct sk_buff_head *skb_list;
skb_list = (struct sk_buff_head *)skb->prev;
if (sk->sk_shutdown & RCV_SHUTDOWN &&
(sk->sk_shutdown & SEND_SHUTDOWN ||
!sctp_ulpevent_is_notification(event)))
goto out_free;
if (!sctp_ulpevent_is_notification(event)) {
sk_mark_napi_id(sk, skb);
sk_incoming_cpu_update(sk);
}
if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
goto out_free;
if (skb_list)
skb_queue_splice_tail_init(skb_list,
&sk->sk_receive_queue);
else
__skb_queue_tail(&sk->sk_receive_queue, skb);
if (!sp->data_ready_signalled) {
sp->data_ready_signalled = 1;
sk->sk_data_ready(sk);
}
return 1;
out_free:
if (skb_list)
sctp_queue_purge_ulpevents(skb_list);
else
sctp_ulpevent_free(event);
return 0;
}
static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sctp_ulpevent *cevent;
struct sk_buff *pos;
pos = skb_peek_tail(&ulpq->reasm_uo);
if (!pos) {
__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
return;
}
cevent = sctp_skb2event(pos);
if (event->stream == cevent->stream &&
event->mid == cevent->mid &&
(cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
(!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
event->fsn > cevent->fsn))) {
__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
return;
}
if ((event->stream == cevent->stream &&
MID_lt(cevent->mid, event->mid)) ||
event->stream > cevent->stream) {
__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
return;
}
skb_queue_walk(&ulpq->reasm_uo, pos) {
cevent = sctp_skb2event(pos);
if (event->stream < cevent->stream ||
(event->stream == cevent->stream &&
MID_lt(event->mid, cevent->mid)))
break;
if (event->stream == cevent->stream &&
event->mid == cevent->mid &&
!(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
(event->msg_flags & SCTP_DATA_FIRST_FRAG ||
event->fsn < cevent->fsn))
break;
}
__skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
}
static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sk_buff *first_frag = NULL;
struct sk_buff *last_frag = NULL;
struct sctp_ulpevent *retval;
struct sctp_stream_in *sin;
struct sk_buff *pos;
__u32 next_fsn = 0;
int is_last = 0;
sin = sctp_stream_in(ulpq->asoc, event->stream);
skb_queue_walk(&ulpq->reasm_uo, pos) {
struct sctp_ulpevent *cevent = sctp_skb2event(pos);
if (cevent->stream < event->stream)
continue;
if (cevent->stream > event->stream)
break;
if (MID_lt(cevent->mid, sin->mid_uo))
continue;
if (MID_lt(sin->mid_uo, cevent->mid))
break;
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
case SCTP_DATA_FIRST_FRAG:
goto out;
case SCTP_DATA_MIDDLE_FRAG:
if (!first_frag) {
if (cevent->fsn == sin->fsn_uo) {
first_frag = pos;
last_frag = pos;
next_fsn = cevent->fsn + 1;
}
} else if (cevent->fsn == next_fsn) {
last_frag = pos;
next_fsn++;
} else {
goto out;
}
break;
case SCTP_DATA_LAST_FRAG:
if (!first_frag) {
if (cevent->fsn == sin->fsn_uo) {
first_frag = pos;
last_frag = pos;
next_fsn = 0;
is_last = 1;
}
} else if (cevent->fsn == next_fsn) {
last_frag = pos;
next_fsn = 0;
is_last = 1;
}
goto out;
default:
goto out;
}
}
out:
if (!first_frag)
return NULL;
retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
&ulpq->reasm_uo, first_frag,
last_frag);
if (retval) {
sin->fsn_uo = next_fsn;
if (is_last) {
retval->msg_flags |= MSG_EOR;
sin->pd_mode_uo = 0;
}
}
return retval;
}
static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sctp_association *asoc = ulpq->asoc;
struct sk_buff *pos, *first_frag = NULL;
struct sctp_ulpevent *retval = NULL;
struct sk_buff *pd_first = NULL;
struct sk_buff *pd_last = NULL;
struct sctp_stream_in *sin;
__u32 next_fsn = 0;
__u32 pd_point = 0;
__u32 pd_len = 0;
__u32 mid = 0;
sin = sctp_stream_in(ulpq->asoc, event->stream);
skb_queue_walk(&ulpq->reasm_uo, pos) {
struct sctp_ulpevent *cevent = sctp_skb2event(pos);
if (cevent->stream < event->stream)
continue;
if (cevent->stream > event->stream)
break;
if (MID_lt(cevent->mid, event->mid))
continue;
if (MID_lt(event->mid, cevent->mid))
break;
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
case SCTP_DATA_FIRST_FRAG:
if (!sin->pd_mode_uo) {
sin->mid_uo = cevent->mid;
pd_first = pos;
pd_last = pos;
pd_len = pos->len;
}
first_frag = pos;
next_fsn = 0;
mid = cevent->mid;
break;
case SCTP_DATA_MIDDLE_FRAG:
if (first_frag && cevent->mid == mid &&
cevent->fsn == next_fsn) {
next_fsn++;
if (pd_first) {
pd_last = pos;
pd_len += pos->len;
}
} else {
first_frag = NULL;
}
break;
case SCTP_DATA_LAST_FRAG:
if (first_frag && cevent->mid == mid &&
cevent->fsn == next_fsn)
goto found;
else
first_frag = NULL;
break;
}
}
if (!pd_first)
goto out;
pd_point = sctp_sk(asoc->base.sk)->pd_point;
if (pd_point && pd_point <= pd_len) {
retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
&ulpq->reasm_uo,
pd_first, pd_last);
if (retval) {
sin->fsn_uo = next_fsn;
sin->pd_mode_uo = 1;
}
}
goto out;
found:
retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
&ulpq->reasm_uo,
first_frag, pos);
if (retval)
retval->msg_flags |= MSG_EOR;
out:
return retval;
}
static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sctp_ulpevent *retval = NULL;
struct sctp_stream_in *sin;
if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
event->msg_flags |= MSG_EOR;
return event;
}
sctp_intl_store_reasm_uo(ulpq, event);
sin = sctp_stream_in(ulpq->asoc, event->stream);
if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
event->fsn == sin->fsn_uo)
retval = sctp_intl_retrieve_partial_uo(ulpq, event);
if (!retval)
retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
return retval;
}
static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
{
struct sctp_stream_in *csin, *sin = NULL;
struct sk_buff *first_frag = NULL;
struct sk_buff *last_frag = NULL;
struct sctp_ulpevent *retval;
struct sk_buff *pos;
__u32 next_fsn = 0;
__u16 sid = 0;
skb_queue_walk(&ulpq->reasm_uo, pos) {
struct sctp_ulpevent *cevent = sctp_skb2event(pos);
csin = sctp_stream_in(ulpq->asoc, cevent->stream);
if (csin->pd_mode_uo)
continue;
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
case SCTP_DATA_FIRST_FRAG:
if (first_frag)
goto out;
first_frag = pos;
last_frag = pos;
next_fsn = 0;
sin = csin;
sid = cevent->stream;
sin->mid_uo = cevent->mid;
break;
case SCTP_DATA_MIDDLE_FRAG:
if (!first_frag)
break;
if (cevent->stream == sid &&
cevent->mid == sin->mid_uo &&
cevent->fsn == next_fsn) {
next_fsn++;
last_frag = pos;
} else {
goto out;
}
break;
case SCTP_DATA_LAST_FRAG:
if (first_frag)
goto out;
break;
default:
break;
}
}
if (!first_frag)
return NULL;
out:
retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
&ulpq->reasm_uo, first_frag,
last_frag);
if (retval) {
sin->fsn_uo = next_fsn;
sin->pd_mode_uo = 1;
}
return retval;
}
static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
struct sctp_chunk *chunk, gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sk_buff_head temp;
int event_eor = 0;
event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
if (!event)
return -ENOMEM;
event->mid = ntohl(chunk->subh.idata_hdr->mid);
if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
event->ppid = chunk->subh.idata_hdr->ppid;
else
event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
event = sctp_intl_reasm(ulpq, event);
if (event && event->msg_flags & MSG_EOR) {
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
event = sctp_intl_order(ulpq, event);
}
} else {
event = sctp_intl_reasm_uo(ulpq, event);
}
if (event) {
event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
sctp_enqueue_event(ulpq, event);
}
return event_eor;
}
static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
{
struct sctp_stream_in *csin, *sin = NULL;
struct sk_buff *first_frag = NULL;
struct sk_buff *last_frag = NULL;
struct sctp_ulpevent *retval;
struct sk_buff *pos;
__u32 next_fsn = 0;
__u16 sid = 0;
skb_queue_walk(&ulpq->reasm, pos) {
struct sctp_ulpevent *cevent = sctp_skb2event(pos);
csin = sctp_stream_in(ulpq->asoc, cevent->stream);
if (csin->pd_mode)
continue;
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
case SCTP_DATA_FIRST_FRAG:
if (first_frag)
goto out;
if (cevent->mid == csin->mid) {
first_frag = pos;
last_frag = pos;
next_fsn = 0;
sin = csin;
sid = cevent->stream;
}
break;
case SCTP_DATA_MIDDLE_FRAG:
if (!first_frag)
break;
if (cevent->stream == sid &&
cevent->mid == sin->mid &&
cevent->fsn == next_fsn) {
next_fsn++;
last_frag = pos;
} else {
goto out;
}
break;
case SCTP_DATA_LAST_FRAG:
if (first_frag)
goto out;
break;
default:
break;
}
}
if (!first_frag)
return NULL;
out:
retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
&ulpq->reasm, first_frag,
last_frag);
if (retval) {
sin->fsn = next_fsn;
sin->pd_mode = 1;
}
return retval;
}
static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
{
struct sctp_ulpevent *event;
if (!skb_queue_empty(&ulpq->reasm)) {
do {
event = sctp_intl_retrieve_first(ulpq);
if (event)
sctp_enqueue_event(ulpq, event);
} while (event);
}
if (!skb_queue_empty(&ulpq->reasm_uo)) {
do {
event = sctp_intl_retrieve_first_uo(ulpq);
if (event)
sctp_enqueue_event(ulpq, event);
} while (event);
}
}
static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
gfp_t gfp)
{
struct sctp_association *asoc = ulpq->asoc;
__u32 freed = 0;
__u16 needed;
if (chunk) {
needed = ntohs(chunk->chunk_hdr->length);
needed -= sizeof(struct sctp_idata_chunk);
} else {
needed = SCTP_DEFAULT_MAXWINDOW;
}
if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
if (freed < needed)
freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
needed);
if (freed < needed)
freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
needed);
}
if (chunk && freed >= needed)
if (sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
sctp_intl_start_pd(ulpq, gfp);
sk_mem_reclaim(asoc->base.sk);
}
static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
__u32 mid, __u16 flags, gfp_t gfp)
{
struct sock *sk = ulpq->asoc->base.sk;
struct sctp_ulpevent *ev = NULL;
if (!sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
&sctp_sk(sk)->subscribe))
return;
ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
sid, mid, flags, gfp);
if (ev) {
__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
if (!sctp_sk(sk)->data_ready_signalled) {
sctp_sk(sk)->data_ready_signalled = 1;
sk->sk_data_ready(sk);
}
}
}
static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
{
struct sctp_stream *stream = &ulpq->asoc->stream;
struct sctp_ulpevent *cevent, *event = NULL;
struct sk_buff_head *lobby = &ulpq->lobby;
struct sk_buff *pos, *tmp;
struct sk_buff_head temp;
__u16 csid;
__u32 cmid;
skb_queue_head_init(&temp);
sctp_skb_for_each(pos, lobby, tmp) {
cevent = (struct sctp_ulpevent *)pos->cb;
csid = cevent->stream;
cmid = cevent->mid;
if (csid > sid)
break;
if (csid < sid)
continue;
if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
break;
__skb_unlink(pos, lobby);
if (!event)
event = sctp_skb2event(pos);
__skb_queue_tail(&temp, pos);
}
if (!event && pos != (struct sk_buff *)lobby) {
cevent = (struct sctp_ulpevent *)pos->cb;
csid = cevent->stream;
cmid = cevent->mid;
if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
sctp_mid_next(stream, in, csid);
__skb_unlink(pos, lobby);
__skb_queue_tail(&temp, pos);
event = sctp_skb2event(pos);
}
}
if (event) {
sctp_intl_retrieve_ordered(ulpq, event);
sctp_enqueue_event(ulpq, event);
}
}
static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
{
struct sctp_stream *stream = &ulpq->asoc->stream;
__u16 sid;
for (sid = 0; sid < stream->incnt; sid++) {
struct sctp_stream_in *sin = &stream->in[sid];
__u32 mid;
if (sin->pd_mode_uo) {
sin->pd_mode_uo = 0;
mid = sin->mid_uo;
sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
}
if (sin->pd_mode) {
sin->pd_mode = 0;
mid = sin->mid;
sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
sctp_mid_skip(stream, in, sid, mid);
sctp_intl_reap_ordered(ulpq, sid);
}
}
/* intl abort pd happens only when all data needs to be cleaned */
sctp_ulpq_flush(ulpq);
}
static struct sctp_stream_interleave sctp_stream_interleave_0 = {
.data_chunk_len = sizeof(struct sctp_data_chunk),
/* DATA process functions */
.make_datafrag = sctp_make_datafrag_empty,
.assign_number = sctp_chunk_assign_ssn,
.validate_data = sctp_validate_data,
.ulpevent_data = sctp_ulpq_tail_data,
.enqueue_event = sctp_ulpq_tail_event,
.renege_events = sctp_ulpq_renege,
.start_pd = sctp_ulpq_partial_delivery,
.abort_pd = sctp_ulpq_abort_pd,
};
static struct sctp_stream_interleave sctp_stream_interleave_1 = {
.data_chunk_len = sizeof(struct sctp_idata_chunk),
/* I-DATA process functions */
.make_datafrag = sctp_make_idatafrag_empty,
.assign_number = sctp_chunk_assign_mid,
.validate_data = sctp_validate_idata,
.ulpevent_data = sctp_ulpevent_idata,
.enqueue_event = sctp_enqueue_event,
.renege_events = sctp_renege_events,
.start_pd = sctp_intl_start_pd,
.abort_pd = sctp_intl_abort_pd,
};
void sctp_stream_interleave_init(struct sctp_stream *stream)
{
struct sctp_association *asoc;
asoc = container_of(stream, struct sctp_association, stream);
stream->si = asoc->intl_enable ? &sctp_stream_interleave_1
: &sctp_stream_interleave_0;
}
...@@ -443,8 +443,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed( ...@@ -443,8 +443,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
goto fail; goto fail;
/* Pull off the common chunk header and DATA header. */ /* Pull off the common chunk header and DATA header. */
skb_pull(skb, sizeof(struct sctp_data_chunk)); skb_pull(skb, sctp_datachk_len(&asoc->stream));
len -= sizeof(struct sctp_data_chunk); len -= sctp_datachk_len(&asoc->stream);
/* Embed the event fields inside the cloned skb. */ /* Embed the event fields inside the cloned skb. */
event = sctp_skb2event(skb); event = sctp_skb2event(skb);
...@@ -705,8 +705,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, ...@@ -705,8 +705,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
sctp_ulpevent_receive_data(event, asoc); sctp_ulpevent_receive_data(event, asoc);
event->stream = ntohs(chunk->subh.data_hdr->stream); event->stream = ntohs(chunk->subh.data_hdr->stream);
event->ssn = ntohs(chunk->subh.data_hdr->ssn);
event->ppid = chunk->subh.data_hdr->ppid;
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
event->flags |= SCTP_UNORDERED; event->flags |= SCTP_UNORDERED;
event->cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); event->cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
...@@ -732,8 +730,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, ...@@ -732,8 +730,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
* various events. * various events.
*/ */
struct sctp_ulpevent *sctp_ulpevent_make_pdapi( struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
const struct sctp_association *asoc, __u32 indication, const struct sctp_association *asoc,
gfp_t gfp) __u32 indication, __u32 sid, __u32 seq,
__u32 flags, gfp_t gfp)
{ {
struct sctp_ulpevent *event; struct sctp_ulpevent *event;
struct sctp_pdapi_event *pd; struct sctp_pdapi_event *pd;
...@@ -754,7 +753,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_pdapi( ...@@ -754,7 +753,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
* Currently unused. * Currently unused.
*/ */
pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
pd->pdapi_flags = 0; pd->pdapi_flags = flags;
pd->pdapi_stream = sid;
pd->pdapi_seq = seq;
/* pdapi_length: 32 bits (unsigned integer) /* pdapi_length: 32 bits (unsigned integer)
* *
......
...@@ -60,6 +60,7 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq, ...@@ -60,6 +60,7 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
ulpq->asoc = asoc; ulpq->asoc = asoc;
skb_queue_head_init(&ulpq->reasm); skb_queue_head_init(&ulpq->reasm);
skb_queue_head_init(&ulpq->reasm_uo);
skb_queue_head_init(&ulpq->lobby); skb_queue_head_init(&ulpq->lobby);
ulpq->pd_mode = 0; ulpq->pd_mode = 0;
...@@ -83,6 +84,10 @@ void sctp_ulpq_flush(struct sctp_ulpq *ulpq) ...@@ -83,6 +84,10 @@ void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
sctp_ulpevent_free(event); sctp_ulpevent_free(event);
} }
while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
event = sctp_skb2event(skb);
sctp_ulpevent_free(event);
}
} }
/* Dispose of a ulpqueue. */ /* Dispose of a ulpqueue. */
...@@ -104,6 +109,9 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, ...@@ -104,6 +109,9 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
if (!event) if (!event)
return -ENOMEM; return -ENOMEM;
event->ssn = ntohs(chunk->subh.data_hdr->ssn);
event->ppid = chunk->subh.data_hdr->ppid;
/* Do reassembly if needed. */ /* Do reassembly if needed. */
event = sctp_ulpq_reasm(ulpq, event); event = sctp_ulpq_reasm(ulpq, event);
...@@ -328,8 +336,9 @@ static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, ...@@ -328,8 +336,9 @@ static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
* payload was fragmented on the way and ip had to reassemble them. * payload was fragmented on the way and ip had to reassemble them.
* We add the rest of skb's to the first skb's fraglist. * We add the rest of skb's to the first skb's fraglist.
*/ */
static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net, struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff_head *queue,
struct sk_buff *f_frag,
struct sk_buff *l_frag) struct sk_buff *l_frag)
{ {
struct sk_buff *pos; struct sk_buff *pos;
...@@ -853,7 +862,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, ...@@ -853,7 +862,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
struct sctp_stream *stream; struct sctp_stream *stream;
/* Check if this message needs ordering. */ /* Check if this message needs ordering. */
if (SCTP_DATA_UNORDERED & event->msg_flags) if (event->msg_flags & SCTP_DATA_UNORDERED)
return event; return event;
/* Note: The stream ID must be verified before this routine. */ /* Note: The stream ID must be verified before this routine. */
...@@ -974,8 +983,8 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) ...@@ -974,8 +983,8 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
sctp_ulpq_reap_ordered(ulpq, sid); sctp_ulpq_reap_ordered(ulpq, sid);
} }
static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
struct sk_buff_head *list, __u16 needed) __u16 needed)
{ {
__u16 freed = 0; __u16 freed = 0;
__u32 tsn, last_tsn; __u32 tsn, last_tsn;
...@@ -1140,7 +1149,7 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) ...@@ -1140,7 +1149,7 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
&sctp_sk(sk)->subscribe)) &sctp_sk(sk)->subscribe))
ev = sctp_ulpevent_make_pdapi(ulpq->asoc, ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
SCTP_PARTIAL_DELIVERY_ABORTED, SCTP_PARTIAL_DELIVERY_ABORTED,
gfp); 0, 0, 0, gfp);
if (ev) if (ev)
__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev)); __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment