Commit b9622ed4 authored by David S. Miller's avatar David S. Miller

Merge branch 'sctp-stream-interleave-part-1'

Xin Long says:

====================
sctp: Implement Stream Interleave: The I-DATA Chunk Supporting User Message Interleaving

Stream Interleave would be Implemented in two Parts:

   1. The I-DATA Chunk Supporting User Message Interleaving
   2. Interaction with Other SCTP Extensions

Overview in section 1.1 of RFC8260 for Part 1:

   This document describes a new chunk carrying payload data called
   I-DATA.  This chunk incorporates the properties of the current SCTP
   DATA chunk, all the flags and fields except the Stream Sequence
   Number (SSN), and also adds two new fields in its chunk header -- the
   Fragment Sequence Number (FSN) and the Message Identifier (MID).  The
   FSN is only used for reassembling all fragments that have the same
   MID and the same ordering property.  The TSN is only used for the
   reliable transfer in combination with Selective Acknowledgment (SACK)
   chunks.

   In addition, the MID is also used for ensuring ordered delivery
   instead of using the stream sequence number (the I-DATA chunk omits
   an SSN).

As the 1st part of Stream Interleave Implementation, this patchset adds
an ops framework named sctp_stream_interleave with a bunch of stuff that
does lots of things needed somewhere.

Then it defines sctp_stream_interleave_0 to work for normal DATA chunks
and sctp_stream_interleave_1 for I-DATA chunks.

With these functions, hundreds of if-else checks for the different process
on I-DATA chunks would be avoided. Besides, very few codes could be shared
in these two function sets.

In this patchset, it adds some basic variables, structures and socket
options firstly, then implement these functions one by one to add the
procedures for ordered idata gradually, at last adjusts some codes to
make them work for unordered idata.

To make it safe to be implemented and also not break the normal data
chunk process, this feature can't be enabled to use until all stream
interleave codes are completely accomplished.

v1 -> v2:
  - fixed a checkpatch warning that a blank line was missed.
  - avoided a kbuild warning reported from gcc-4.9.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 92ff4264 13228238
......@@ -102,6 +102,9 @@ enum sctp_cid {
/* AUTH Extension Section 4.1 */
SCTP_CID_AUTH = 0x0F,
/* sctp ndata 5.1. I-DATA */
SCTP_CID_I_DATA = 0x40,
/* PR-SCTP Sec 3.2 */
SCTP_CID_FWD_TSN = 0xC0,
......@@ -240,6 +243,23 @@ struct sctp_data_chunk {
struct sctp_datahdr data_hdr;
};
struct sctp_idatahdr {
__be32 tsn;
__be16 stream;
__be16 reserved;
__be32 mid;
union {
__u32 ppid;
__be32 fsn;
};
__u8 payload[0];
};
struct sctp_idata_chunk {
struct sctp_chunkhdr chunk_hdr;
struct sctp_idatahdr data_hdr;
};
/* DATA Chuck Specific Flags */
enum {
SCTP_DATA_MIDDLE_FRAG = 0x00,
......
......@@ -122,9 +122,12 @@ struct netns_sctp {
/* Flag to indicate if PR-CONFIG is enabled. */
int reconf_enable;
/* Flag to idicate if SCTP-AUTH is enabled */
/* Flag to indicate if SCTP-AUTH is enabled */
int auth_enable;
/* Flag to indicate if stream interleave is enabled */
int intl_enable;
/*
* Policy to control SCTP IPv4 address scoping
* 0 - Disable IPv4 address scoping
......
......@@ -145,12 +145,13 @@ SCTP_SUBTYPE_CONSTRUCTOR(OTHER, enum sctp_event_other, other)
SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, enum sctp_event_primitive, primitive)
#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA)
#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA || \
a->chunk_hdr->type == SCTP_CID_I_DATA)
/* Calculate the actual data size in a data chunk */
#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end)\
- (unsigned long)(c->chunk_hdr)\
- sizeof(struct sctp_data_chunk)))
#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end) - \
(unsigned long)(c->chunk_hdr) - \
sctp_datachk_len(&c->asoc->stream)))
/* Internal error codes */
enum sctp_ierror {
......
......@@ -444,13 +444,13 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
int frag = pmtu;
frag -= sp->pf->af->net_header_len;
frag -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk);
frag -= sizeof(struct sctphdr) + sctp_datachk_len(&asoc->stream);
if (asoc->user_frag)
frag = min_t(int, frag, asoc->user_frag);
frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN -
sizeof(struct sctp_data_chunk)));
sctp_datachk_len(&asoc->stream)));
return frag;
}
......
......@@ -197,10 +197,11 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc,
const __u32 lowest_tsn,
const struct sctp_chunk *chunk);
struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
struct sctp_chunk *sctp_make_idata(const struct sctp_association *asoc,
__u8 flags, int paylen, gfp_t gfp);
struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc,
const struct sctp_sndrcvinfo *sinfo,
int len, const __u8 flags,
__u16 ssn, gfp_t gfp);
int len, __u8 flags, gfp_t gfp);
struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
const __u32 lowest_tsn);
struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc);
......@@ -342,7 +343,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
__u16 size;
size = ntohs(chunk->chunk_hdr->length);
size -= sizeof(struct sctp_data_chunk);
size -= sctp_datahdr_len(&chunk->asoc->stream);
return size;
}
......@@ -358,6 +359,12 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
typecheck(__u32, b) && \
((__s32)((a) - (b)) <= 0))
/* Compare two MIDs */
#define MID_lt(a, b) \
(typecheck(__u32, a) && \
typecheck(__u32, b) && \
((__s32)((a) - (b)) < 0))
/* Compare two SSNs */
#define SSN_lt(a,b) \
(typecheck(__u16, a) && \
......
/* SCTP kernel implementation
* (C) Copyright Red Hat Inc. 2017
*
* These are definitions used by the stream schedulers, defined in RFC
* draft ndata (https://tools.ietf.org/html/draft-ietf-tsvwg-sctp-ndata-11)
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email addresses:
* lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Xin Long <lucien.xin@gmail.com>
*/
#ifndef __sctp_stream_interleave_h__
#define __sctp_stream_interleave_h__
struct sctp_stream_interleave {
__u16 data_chunk_len;
/* (I-)DATA process */
struct sctp_chunk *(*make_datafrag)(const struct sctp_association *asoc,
const struct sctp_sndrcvinfo *sinfo,
int len, __u8 flags, gfp_t gfp);
void (*assign_number)(struct sctp_chunk *chunk);
bool (*validate_data)(struct sctp_chunk *chunk);
int (*ulpevent_data)(struct sctp_ulpq *ulpq,
struct sctp_chunk *chunk, gfp_t gfp);
int (*enqueue_event)(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event);
void (*renege_events)(struct sctp_ulpq *ulpq,
struct sctp_chunk *chunk, gfp_t gfp);
void (*start_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
void (*abort_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
};
void sctp_stream_interleave_init(struct sctp_stream *stream);
#endif /* __sctp_stream_interleave_h__ */
......@@ -89,6 +89,7 @@ struct sctp_stream;
#include <net/sctp/tsnmap.h>
#include <net/sctp/ulpevent.h>
#include <net/sctp/ulpqueue.h>
#include <net/sctp/stream_interleave.h>
/* Structures useful for managing bind/connect. */
......@@ -217,6 +218,7 @@ struct sctp_sock {
disable_fragments:1,
v4mapped:1,
frag_interleave:1,
strm_interleave:1,
recvrcvinfo:1,
recvnxtinfo:1,
data_ready_signalled:1;
......@@ -397,6 +399,28 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new);
#define sctp_ssn_skip(stream, type, sid, ssn) \
((stream)->type[sid].ssn = ssn + 1)
/* What is the current MID number for this stream? */
#define sctp_mid_peek(stream, type, sid) \
((stream)->type[sid].mid)
/* Return the next MID number for this stream. */
#define sctp_mid_next(stream, type, sid) \
((stream)->type[sid].mid++)
/* Skip over this mid and all below. */
#define sctp_mid_skip(stream, type, sid, mid) \
((stream)->type[sid].mid = mid + 1)
#define sctp_stream_in(asoc, sid) (&(asoc)->stream.in[sid])
/* What is the current MID_uo number for this stream? */
#define sctp_mid_uo_peek(stream, type, sid) \
((stream)->type[sid].mid_uo)
/* Return the next MID_uo number for this stream. */
#define sctp_mid_uo_next(stream, type, sid) \
((stream)->type[sid].mid_uo++)
/*
* Pointers to address related SCTP functions.
* (i.e. things that depend on the address family.)
......@@ -574,6 +598,7 @@ struct sctp_chunk {
struct sctp_addiphdr *addip_hdr;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
struct sctp_authhdr *auth_hdr;
struct sctp_idatahdr *idata_hdr;
} subh;
__u8 *chunk_end;
......@@ -620,6 +645,7 @@ struct sctp_chunk {
__u16 rtt_in_progress:1, /* This chunk used for RTT calc? */
has_tsn:1, /* Does this chunk have a TSN yet? */
has_ssn:1, /* Does this chunk have a SSN yet? */
#define has_mid has_ssn
singleton:1, /* Only chunk in the packet? */
end_of_packet:1, /* Last chunk in the packet? */
ecn_ce_done:1, /* Have we processed the ECN CE bit? */
......@@ -1357,13 +1383,25 @@ struct sctp_stream_out_ext {
};
struct sctp_stream_out {
__u16 ssn;
__u8 state;
union {
__u32 mid;
__u16 ssn;
};
__u32 mid_uo;
struct sctp_stream_out_ext *ext;
__u8 state;
};
struct sctp_stream_in {
__u16 ssn;
union {
__u32 mid;
__u16 ssn;
};
__u32 mid_uo;
__u32 fsn;
__u32 fsn_uo;
char pd_mode;
char pd_mode_uo;
};
struct sctp_stream {
......@@ -1387,11 +1425,22 @@ struct sctp_stream {
struct sctp_stream_out_ext *rr_next;
};
};
struct sctp_stream_interleave *si;
};
#define SCTP_STREAM_CLOSED 0x00
#define SCTP_STREAM_OPEN 0x01
static inline __u16 sctp_datachk_len(const struct sctp_stream *stream)
{
return stream->si->data_chunk_len;
}
static inline __u16 sctp_datahdr_len(const struct sctp_stream *stream)
{
return stream->si->data_chunk_len - sizeof(struct sctp_chunkhdr);
}
/* SCTP_GET_ASSOC_STATS counters */
struct sctp_priv_assoc_stats {
/* Maximum observed rto in the association during subsequent
......@@ -1940,6 +1989,7 @@ struct sctp_association {
__u8 need_ecne:1, /* Need to send an ECNE Chunk? */
temp:1, /* Is it a temporary association? */
force_delay:1,
intl_enable:1,
prsctp_enable:1,
reconf_enable:1;
......
......@@ -45,19 +45,29 @@
/* A structure to carry information to the ULP (e.g. Sockets API) */
/* Warning: This sits inside an skb.cb[] area. Be very careful of
* growing this structure as it is at the maximum limit now.
*
* sctp_ulpevent is saved in sk->cb(48 bytes), whose last 4 bytes
* have been taken by sock_skb_cb, So here it has to use 'packed'
* to make sctp_ulpevent fit into the rest 44 bytes.
*/
struct sctp_ulpevent {
struct sctp_association *asoc;
struct sctp_chunk *chunk;
unsigned int rmem_len;
__u32 ppid;
union {
__u32 mid;
__u16 ssn;
};
union {
__u32 ppid;
__u32 fsn;
};
__u32 tsn;
__u32 cumtsn;
__u16 stream;
__u16 ssn;
__u16 flags;
__u16 msg_flags;
};
} __packed;
/* Retrieve the skb this event sits inside of. */
static inline struct sk_buff *sctp_event2skb(const struct sctp_ulpevent *ev)
......@@ -112,7 +122,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
const struct sctp_association *asoc,
__u32 indication, gfp_t gfp);
__u32 indication, __u32 sid, __u32 seq,
__u32 flags, gfp_t gfp);
struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication(
const struct sctp_association *asoc, gfp_t gfp);
......@@ -140,6 +151,10 @@ struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event(
const struct sctp_association *asoc, __u16 flags,
__u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp);
struct sctp_ulpevent *sctp_make_reassembled_event(
struct net *net, struct sk_buff_head *queue,
struct sk_buff *f_frag, struct sk_buff *l_frag);
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
struct msghdr *);
void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
......
......@@ -45,6 +45,7 @@ struct sctp_ulpq {
char pd_mode;
struct sctp_association *asoc;
struct sk_buff_head reasm;
struct sk_buff_head reasm_uo;
struct sk_buff_head lobby;
};
......@@ -76,11 +77,8 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc);
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);
void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32);
#endif /* __sctp_ulpqueue_h__ */
__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
struct sk_buff_head *list, __u16 needed);
#endif /* __sctp_ulpqueue_h__ */
......@@ -125,6 +125,7 @@ typedef __s32 sctp_assoc_t;
#define SCTP_SOCKOPT_PEELOFF_FLAGS 122
#define SCTP_STREAM_SCHEDULER 123
#define SCTP_STREAM_SCHEDULER_VALUE 124
#define SCTP_INTERLEAVING_SUPPORTED 125
/* PR-SCTP policies */
#define SCTP_PR_SCTP_NONE 0x0000
......@@ -459,6 +460,8 @@ struct sctp_pdapi_event {
__u32 pdapi_length;
__u32 pdapi_indication;
sctp_assoc_t pdapi_assoc_id;
__u32 pdapi_stream;
__u32 pdapi_seq;
};
enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, };
......
......@@ -14,7 +14,7 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
tsnmap.o bind_addr.o socket.o primitive.o \
output.o input.o debug.o stream.o auth.o \
offload.o stream_sched.o stream_sched_prio.o \
stream_sched_rr.o
stream_sched_rr.o stream_interleave.o
sctp_probe-y := probe.o
......
......@@ -861,7 +861,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
0, spc_state, error, GFP_ATOMIC);
if (event)
sctp_ulpq_tail_event(&asoc->ulpq, event);
asoc->stream.si->enqueue_event(&asoc->ulpq, event);
}
/* Select new active and retran paths. */
......
......@@ -124,7 +124,7 @@ static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent,
error, GFP_ATOMIC);
if (ev)
sctp_ulpq_tail_event(&asoc->ulpq, ev);
asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
}
sctp_chunk_put(chunk);
......@@ -191,7 +191,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
*/
max_data = asoc->pathmtu -
sctp_sk(asoc->base.sk)->pf->af->net_header_len -
sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk);
sizeof(struct sctphdr) - sctp_datachk_len(&asoc->stream);
max_data = SCTP_TRUNC4(max_data);
/* If the the peer requested that we authenticate DATA chunks
......@@ -264,8 +264,8 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
frag |= SCTP_DATA_SACK_IMM;
}
chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag,
0, GFP_KERNEL);
chunk = asoc->stream.si->make_datafrag(asoc, sinfo, len, frag,
GFP_KERNEL);
if (!chunk) {
err = -ENOMEM;
goto errout;
......
......@@ -313,6 +313,7 @@ static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
/* We believe that this chunk is OK to add to the packet */
switch (chunk->chunk_hdr->type) {
case SCTP_CID_DATA:
case SCTP_CID_I_DATA:
/* Account for the data being in the packet */
sctp_packet_append_data(packet, chunk);
/* Disallow SACK bundling after DATA. */
......@@ -724,7 +725,7 @@ static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
* or delay in hopes of bundling a full sized packet.
*/
if (chunk->skb->len + q->out_qlen > transport->pathmtu -
packet->overhead - sizeof(struct sctp_data_chunk) - 4)
packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4)
/* Enough data queued to fill a packet */
return SCTP_XMIT_OK;
......@@ -759,7 +760,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
asoc->peer.rwnd = rwnd;
sctp_chunk_assign_tsn(chunk);
sctp_chunk_assign_ssn(chunk);
asoc->stream.si->assign_number(chunk);
}
static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
......
......@@ -228,7 +228,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
struct sctp_inithdr init;
union sctp_params addrs;
struct sctp_sock *sp;
__u8 extensions[4];
__u8 extensions[5];
size_t chunksize;
__be16 types[2];
int num_ext = 0;
......@@ -278,6 +278,11 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
if (sp->adaptation_ind)
chunksize += sizeof(aiparam);
if (sp->strm_interleave) {
extensions[num_ext] = SCTP_CID_I_DATA;
num_ext += 1;
}
chunksize += vparam_len;
/* Account for AUTH related parameters */
......@@ -392,7 +397,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
struct sctp_inithdr initack;
union sctp_params addrs;
struct sctp_sock *sp;
__u8 extensions[4];
__u8 extensions[5];
size_t chunksize;
int num_ext = 0;
int cookie_len;
......@@ -442,6 +447,11 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
if (sp->adaptation_ind)
chunksize += sizeof(aiparam);
if (asoc->intl_enable) {
extensions[num_ext] = SCTP_CID_I_DATA;
num_ext += 1;
}
if (asoc->peer.auth_capable) {
auth_random = (struct sctp_paramhdr *)asoc->c.auth_random;
chunksize += ntohs(auth_random->length);
......@@ -711,38 +721,31 @@ struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
/* Make a DATA chunk for the given association from the provided
* parameters. However, do not populate the data payload.
*/
struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc,
const struct sctp_sndrcvinfo *sinfo,
int data_len, __u8 flags, __u16 ssn,
gfp_t gfp)
int len, __u8 flags, gfp_t gfp)
{
struct sctp_chunk *retval;
struct sctp_datahdr dp;
int chunk_len;
/* We assign the TSN as LATE as possible, not here when
* creating the chunk.
*/
dp.tsn = 0;
memset(&dp, 0, sizeof(dp));
dp.ppid = sinfo->sinfo_ppid;
dp.stream = htons(sinfo->sinfo_stream);
dp.ppid = sinfo->sinfo_ppid;
/* Set the flags for an unordered send. */
if (sinfo->sinfo_flags & SCTP_UNORDERED) {
if (sinfo->sinfo_flags & SCTP_UNORDERED)
flags |= SCTP_DATA_UNORDERED;
dp.ssn = 0;
} else
dp.ssn = htons(ssn);
chunk_len = sizeof(dp) + data_len;
retval = sctp_make_data(asoc, flags, chunk_len, gfp);
retval = sctp_make_data(asoc, flags, sizeof(dp) + len, gfp);
if (!retval)
goto nodata;
return NULL;
retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
nodata:
return retval;
}
......@@ -1415,6 +1418,12 @@ static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc,
return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen, gfp);
}
struct sctp_chunk *sctp_make_idata(const struct sctp_association *asoc,
__u8 flags, int paylen, gfp_t gfp)
{
return _sctp_make_chunk(asoc, SCTP_CID_I_DATA, flags, paylen, gfp);
}
static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc,
__u8 type, __u8 flags, int paylen,
gfp_t gfp)
......@@ -2032,6 +2041,10 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
if (net->sctp.addip_enable)
asoc->peer.asconf_capable = 1;
break;
case SCTP_CID_I_DATA:
if (sctp_sk(asoc->base.sk)->strm_interleave)
asoc->intl_enable = 1;
break;
default:
break;
}
......
......@@ -632,7 +632,7 @@ static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands,
struct sctp_chunk *abort;
/* Cancel any partial delivery in progress. */
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
asoc->stream.si->abort_pd(&asoc->ulpq, GFP_ATOMIC);
if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
......@@ -972,7 +972,7 @@ static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds,
if (!ev)
return;
sctp_ulpq_tail_event(&asoc->ulpq, ev);
asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
switch (err_hdr->cause) {
case SCTP_ERROR_UNKNOWN_CHUNK:
......@@ -1058,7 +1058,7 @@ static void sctp_cmd_assoc_change(struct sctp_cmd_seq *commands,
asoc->c.sinit_max_instreams,
NULL, GFP_ATOMIC);
if (ev)
sctp_ulpq_tail_event(&asoc->ulpq, ev);
asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
}
/* Helper function to generate an adaptation indication event */
......@@ -1070,7 +1070,7 @@ static void sctp_cmd_adaptation_ind(struct sctp_cmd_seq *commands,
ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
if (ev)
sctp_ulpq_tail_event(&asoc->ulpq, ev);
asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
}
......@@ -1483,8 +1483,9 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n",
__func__, cmd->obj.chunk, &asoc->ulpq);
sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk,
GFP_ATOMIC);
asoc->stream.si->ulpevent_data(&asoc->ulpq,
cmd->obj.chunk,
GFP_ATOMIC);
break;
case SCTP_CMD_EVENT_ULP:
......@@ -1492,7 +1493,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n",
__func__, cmd->obj.ulpevent, &asoc->ulpq);
sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent);
asoc->stream.si->enqueue_event(&asoc->ulpq,
cmd->obj.ulpevent);
break;
case SCTP_CMD_REPLY:
......@@ -1729,12 +1731,13 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
break;
case SCTP_CMD_PART_DELIVER:
sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC);
asoc->stream.si->start_pd(&asoc->ulpq, GFP_ATOMIC);
break;
case SCTP_CMD_RENEGE:
sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk,
GFP_ATOMIC);
asoc->stream.si->renege_events(&asoc->ulpq,
cmd->obj.chunk,
GFP_ATOMIC);
break;
case SCTP_CMD_SETUP_T4:
......
......@@ -3013,7 +3013,7 @@ enum sctp_disposition sctp_sf_eat_data_6_2(struct net *net,
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_data_chunk)))
if (!sctp_chunk_length_valid(chunk, sctp_datachk_len(&asoc->stream)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
......@@ -3034,7 +3034,7 @@ enum sctp_disposition sctp_sf_eat_data_6_2(struct net *net,
case SCTP_IERROR_PROTO_VIOLATION:
return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
(u8 *)chunk->subh.data_hdr,
sizeof(struct sctp_datahdr));
sctp_datahdr_len(&asoc->stream));
default:
BUG();
}
......@@ -3133,7 +3133,7 @@ enum sctp_disposition sctp_sf_eat_data_fast_4_4(
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_data_chunk)))
if (!sctp_chunk_length_valid(chunk, sctp_datachk_len(&asoc->stream)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
......@@ -3150,7 +3150,7 @@ enum sctp_disposition sctp_sf_eat_data_fast_4_4(
case SCTP_IERROR_PROTO_VIOLATION:
return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
(u8 *)chunk->subh.data_hdr,
sizeof(struct sctp_datahdr));
sctp_datahdr_len(&asoc->stream));
default:
BUG();
}
......@@ -6244,14 +6244,12 @@ static int sctp_eat_data(const struct sctp_association *asoc,
struct sctp_chunk *err;
enum sctp_verb deliver;
size_t datalen;
u8 ordered = 0;
u16 ssn, sid;
__u32 tsn;
int tmp;
data_hdr = (struct sctp_datahdr *)chunk->skb->data;
chunk->subh.data_hdr = data_hdr;
skb_pull(chunk->skb, sizeof(*data_hdr));
skb_pull(chunk->skb, sctp_datahdr_len(&asoc->stream));
tsn = ntohl(data_hdr->tsn);
pr_debug("%s: TSN 0x%x\n", __func__, tsn);
......@@ -6299,7 +6297,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
* Actually, allow a little bit of overflow (up to a MTU).
*/
datalen = ntohs(chunk->chunk_hdr->length);
datalen -= sizeof(struct sctp_data_chunk);
datalen -= sctp_datachk_len(&asoc->stream);
deliver = SCTP_CMD_CHUNK_ULP;
......@@ -6394,7 +6392,6 @@ static int sctp_eat_data(const struct sctp_association *asoc,
SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS);
if (chunk->asoc)
chunk->asoc->stats.iodchunks++;
ordered = 1;
}
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
......@@ -6405,8 +6402,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
* with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
* and discard the DATA chunk.
*/
sid = ntohs(data_hdr->stream);
if (sid >= asoc->stream.incnt) {
if (ntohs(data_hdr->stream) >= asoc->stream.incnt) {
/* Mark tsn as received even though we drop it */
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
......@@ -6427,8 +6423,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
* SSN is smaller then the next expected one. If it is, it wrapped
* and is invalid.
*/
ssn = ntohs(data_hdr->ssn);
if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->stream, in, sid)))
if (!asoc->stream.si->validate_data(chunk))
return SCTP_IERROR_PROTO_VIOLATION;
/* Send the data up to the user. Note: Schedule the
......
......@@ -985,6 +985,9 @@ static const struct sctp_sm_table_entry *sctp_chunk_event_lookup(
if (state > SCTP_STATE_MAX)
return &bug;
if (net->sctp.intl_enable && cid == SCTP_CID_I_DATA)
cid = SCTP_CID_DATA;
if (cid <= SCTP_CID_BASE_MAX)
return &chunk_event_table[cid][state];
......
......@@ -201,6 +201,22 @@ static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
cb(chunk);
}
static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk,
void (*cb)(struct sk_buff *, struct sock *))
{
struct sk_buff *skb, *tmp;
sctp_skb_for_each(skb, &asoc->ulpq.lobby, tmp)
cb(skb, sk);
sctp_skb_for_each(skb, &asoc->ulpq.reasm, tmp)
cb(skb, sk);
sctp_skb_for_each(skb, &asoc->ulpq.reasm_uo, tmp)
cb(skb, sk);
}
/* Verify that this is a valid address. */
static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
int len)
......@@ -1554,6 +1570,7 @@ static void sctp_close(struct sock *sk, long timeout)
if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
!skb_queue_empty(&asoc->ulpq.reasm) ||
!skb_queue_empty(&asoc->ulpq.reasm_uo) ||
(sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
struct sctp_chunk *chunk;
......@@ -2002,7 +2019,20 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
if (err < 0)
goto out_free;
wait_connect = true;
/* If stream interleave is enabled, wait_connect has to be
* done earlier than data enqueue, as it needs to make data
* or idata according to asoc->intl_enable which is set
* after connection is done.
*/
if (sctp_sk(asoc->base.sk)->strm_interleave) {
timeo = sock_sndtimeo(sk, 0);
err = sctp_wait_for_connect(asoc, &timeo);
if (err)
goto out_unlock;
} else {
wait_connect = true;
}
pr_debug("%s: we associated primitively\n", __func__);
}
......@@ -2281,7 +2311,7 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
if (!event)
return -ENOMEM;
sctp_ulpq_tail_event(&asoc->ulpq, event);
asoc->stream.si->enqueue_event(&asoc->ulpq, event);
}
}
......@@ -3180,7 +3210,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
if (val == 0) {
val = asoc->pathmtu - sp->pf->af->net_header_len;
val -= sizeof(struct sctphdr) +
sizeof(struct sctp_data_chunk);
sctp_datachk_len(&asoc->stream);
}
asoc->user_frag = val;
asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
......@@ -3350,7 +3380,10 @@ static int sctp_setsockopt_fragment_interleave(struct sock *sk,
if (get_user(val, (int __user *)optval))
return -EFAULT;
sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1;
sctp_sk(sk)->frag_interleave = !!val;
if (!sctp_sk(sk)->frag_interleave)
sctp_sk(sk)->strm_interleave = 0;
return 0;
}
......@@ -4019,6 +4052,40 @@ static int sctp_setsockopt_scheduler_value(struct sock *sk,
return retval;
}
static int sctp_setsockopt_interleaving_supported(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_sock *sp = sctp_sk(sk);
struct net *net = sock_net(sk);
struct sctp_assoc_value params;
int retval = -EINVAL;
if (optlen < sizeof(params))
goto out;
optlen = sizeof(params);
if (copy_from_user(&params, optval, optlen)) {
retval = -EFAULT;
goto out;
}
if (params.assoc_id)
goto out;
if (!net->sctp.intl_enable || !sp->frag_interleave) {
retval = -EPERM;
goto out;
}
sp->strm_interleave = !!params.assoc_value;
retval = 0;
out:
return retval;
}
/* API 6.2 setsockopt(), getsockopt()
*
* Applications use setsockopt() and getsockopt() to set or retrieve
......@@ -4206,6 +4273,10 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
case SCTP_STREAM_SCHEDULER_VALUE:
retval = sctp_setsockopt_scheduler_value(sk, optval, optlen);
break;
case SCTP_INTERLEAVING_SUPPORTED:
retval = sctp_setsockopt_interleaving_supported(sk, optval,
optlen);
break;
default:
retval = -ENOPROTOOPT;
break;
......@@ -6969,6 +7040,47 @@ static int sctp_getsockopt_scheduler_value(struct sock *sk, int len,
return retval;
}
static int sctp_getsockopt_interleaving_supported(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
struct sctp_assoc_value params;
struct sctp_association *asoc;
int retval = -EFAULT;
if (len < sizeof(params)) {
retval = -EINVAL;
goto out;
}
len = sizeof(params);
if (copy_from_user(&params, optval, len))
goto out;
asoc = sctp_id2assoc(sk, params.assoc_id);
if (asoc) {
params.assoc_value = asoc->intl_enable;
} else if (!params.assoc_id) {
struct sctp_sock *sp = sctp_sk(sk);
params.assoc_value = sp->strm_interleave;
} else {
retval = -EINVAL;
goto out;
}
if (put_user(len, optlen))
goto out;
if (copy_to_user(optval, &params, len))
goto out;
retval = 0;
out:
return retval;
}
static int sctp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
......@@ -7159,6 +7271,10 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
retval = sctp_getsockopt_scheduler_value(sk, len, optval,
optlen);
break;
case SCTP_INTERLEAVING_SUPPORTED:
retval = sctp_getsockopt_interleaving_supported(sk, len, optval,
optlen);
break;
default:
retval = -ENOPROTOOPT;
break;
......@@ -8396,11 +8512,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
}
sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp)
sctp_skb_set_owner_r_frag(skb, newsk);
sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp)
sctp_skb_set_owner_r_frag(skb, newsk);
sctp_for_each_rx_skb(assoc, newsk, sctp_skb_set_owner_r_frag);
/* Set the type of socket to indicate that it is peeled off from the
* original UDP-style socket or created with the accept() call on a
......
......@@ -167,6 +167,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
sched->init(stream);
in:
sctp_stream_interleave_init(stream);
if (!incnt)
goto out;
......
This diff is collapsed.
......@@ -443,8 +443,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
goto fail;
/* Pull off the common chunk header and DATA header. */
skb_pull(skb, sizeof(struct sctp_data_chunk));
len -= sizeof(struct sctp_data_chunk);
skb_pull(skb, sctp_datachk_len(&asoc->stream));
len -= sctp_datachk_len(&asoc->stream);
/* Embed the event fields inside the cloned skb. */
event = sctp_skb2event(skb);
......@@ -705,8 +705,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
sctp_ulpevent_receive_data(event, asoc);
event->stream = ntohs(chunk->subh.data_hdr->stream);
event->ssn = ntohs(chunk->subh.data_hdr->ssn);
event->ppid = chunk->subh.data_hdr->ppid;
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
event->flags |= SCTP_UNORDERED;
event->cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
......@@ -732,8 +730,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
* various events.
*/
struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
const struct sctp_association *asoc, __u32 indication,
gfp_t gfp)
const struct sctp_association *asoc,
__u32 indication, __u32 sid, __u32 seq,
__u32 flags, gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_pdapi_event *pd;
......@@ -754,7 +753,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
* Currently unused.
*/
pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
pd->pdapi_flags = 0;
pd->pdapi_flags = flags;
pd->pdapi_stream = sid;
pd->pdapi_seq = seq;
/* pdapi_length: 32 bits (unsigned integer)
*
......
......@@ -60,6 +60,7 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
ulpq->asoc = asoc;
skb_queue_head_init(&ulpq->reasm);
skb_queue_head_init(&ulpq->reasm_uo);
skb_queue_head_init(&ulpq->lobby);
ulpq->pd_mode = 0;
......@@ -83,6 +84,10 @@ void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
sctp_ulpevent_free(event);
}
while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
event = sctp_skb2event(skb);
sctp_ulpevent_free(event);
}
}
/* Dispose of a ulpqueue. */
......@@ -104,6 +109,9 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
if (!event)
return -ENOMEM;
event->ssn = ntohs(chunk->subh.data_hdr->ssn);
event->ppid = chunk->subh.data_hdr->ppid;
/* Do reassembly if needed. */
event = sctp_ulpq_reasm(ulpq, event);
......@@ -328,9 +336,10 @@ static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
* payload was fragmented on the way and ip had to reassemble them.
* We add the rest of skb's to the first skb's fraglist.
*/
static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
struct sk_buff_head *queue, struct sk_buff *f_frag,
struct sk_buff *l_frag)
struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
struct sk_buff_head *queue,
struct sk_buff *f_frag,
struct sk_buff *l_frag)
{
struct sk_buff *pos;
struct sk_buff *new = NULL;
......@@ -853,7 +862,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
struct sctp_stream *stream;
/* Check if this message needs ordering. */
if (SCTP_DATA_UNORDERED & event->msg_flags)
if (event->msg_flags & SCTP_DATA_UNORDERED)
return event;
/* Note: The stream ID must be verified before this routine. */
......@@ -974,8 +983,8 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
sctp_ulpq_reap_ordered(ulpq, sid);
}
static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
struct sk_buff_head *list, __u16 needed)
__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
__u16 needed)
{
__u16 freed = 0;
__u32 tsn, last_tsn;
......@@ -1140,7 +1149,7 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
&sctp_sk(sk)->subscribe))
ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
SCTP_PARTIAL_DELIVERY_ABORTED,
gfp);
0, 0, 0, gfp);
if (ev)
__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment