Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
033f9f79
Commit
033f9f79
authored
Jan 31, 2003
by
Jon Grimm
Browse files
Options
Browse Files
Download
Plain Diff
Merge touki.austin.ibm.com:/home/jgrimm/bk/lksctp-2.5
into touki.austin.ibm.com:/home/jgrimm/bk/lksctp-2.5.work
parents
06fbb458
6258f7e8
Changes
7
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
188 additions
and
133 deletions
+188
-133
include/net/sctp/constants.h
include/net/sctp/constants.h
+23
-27
include/net/sctp/sm.h
include/net/sctp/sm.h
+4
-0
include/net/sctp/structs.h
include/net/sctp/structs.h
+3
-11
net/sctp/associola.c
net/sctp/associola.c
+0
-5
net/sctp/outqueue.c
net/sctp/outqueue.c
+15
-2
net/sctp/sm_make_chunk.c
net/sctp/sm_make_chunk.c
+128
-51
net/sctp/socket.c
net/sctp/socket.c
+15
-37
No files found.
include/net/sctp/constants.h
View file @
033f9f79
...
...
@@ -61,15 +61,6 @@ enum { SCTP_MAX_STREAM = 0xffff };
enum
{
SCTP_DEFAULT_OUTSTREAMS
=
10
};
enum
{
SCTP_DEFAULT_INSTREAMS
=
SCTP_MAX_STREAM
};
/* Define the amount of space to reserve for SCTP, IP, LL.
* There is a little bit of waste that we are always allocating
* for ipv6 headers, but this seems worth the simplicity.
*/
#define SCTP_IP_OVERHEAD ((sizeof(struct sctphdr)\
+ sizeof(struct ipv6hdr)\
+ MAX_HEADER))
/* Define the amount of space to reserve for SCTP, IP, LL.
* There is a little bit of waste that we are always allocating
* for ipv6 headers, but this seems worth the simplicity.
...
...
@@ -250,8 +241,13 @@ extern const char *sctp_state_tbl[], *sctp_evttype_tbl[], *sctp_status_tbl[];
#define SCTP_ADDR_REACHABLE 2
#define SCTP_ADDR_NOT_REACHABLE 1
/* Maximum chunk length considering padding requirements. */
enum
{
SCTP_MAX_CHUNK_LEN
=
((
1
<<
16
)
-
sizeof
(
__u32
))
};
/* Encourage Cookie-Echo bundling by pre-fragmenting chunks a little
* harder (until reaching ESTABLISHED state).
*/
enum
{
SCTP_ARBITRARY_COOKIE_ECHO_LEN
=
200
};
/* Guess at how big to make the TSN mapping array.
* We guarantee that we can handle at least this big a gap between the
...
...
@@ -271,7 +267,8 @@ extern const char *sctp_state_tbl[], *sctp_evttype_tbl[], *sctp_status_tbl[];
* is enough room for 131 duplicate reports. Round down to the
* nearest power of 2.
*/
#define SCTP_MAX_DUP_TSNS 128
enum
{
SCTP_MIN_PMTU
=
576
};
enum
{
SCTP_MAX_DUP_TSNS
=
128
};
typedef
enum
{
SCTP_COUNTER_INIT_ERROR
,
...
...
@@ -280,7 +277,6 @@ typedef enum {
/* How many counters does an association need? */
#define SCTP_NUMBER_COUNTERS 5
/* Here we define the default timers. */
/* cookie timer def = ? seconds */
...
...
include/net/sctp/sm.h
View file @
033f9f79
...
...
@@ -270,6 +270,10 @@ sctp_chunk_t *sctp_make_op_error(const sctp_association_t *,
size_t
paylen
);
void
sctp_chunk_assign_tsn
(
sctp_chunk_t
*
);
void
sctp_chunk_assign_ssn
(
sctp_chunk_t
*
);
int
sctp_datachunks_from_user
(
sctp_association_t
*
,
const
struct
sctp_sndrcvinfo
*
,
struct
msghdr
*
,
int
len
,
struct
sk_buff_head
*
);
/* Prototypes for statetable processing. */
...
...
include/net/sctp/structs.h
View file @
033f9f79
...
...
@@ -567,9 +567,7 @@ struct SCTP_chunk {
sctp_chunk_t
*
sctp_make_chunk
(
const
sctp_association_t
*
,
__u8
type
,
__u8
flags
,
int
size
);
void
sctp_free_chunk
(
sctp_chunk_t
*
);
sctp_chunk_t
*
sctp_copy_chunk
(
sctp_chunk_t
*
,
int
flags
);
void
*
sctp_addto_chunk
(
sctp_chunk_t
*
chunk
,
int
len
,
const
void
*
data
);
int
sctp_user_addto_chunk
(
sctp_chunk_t
*
chunk
,
int
len
,
struct
iovec
*
data
);
sctp_chunk_t
*
sctp_chunkify
(
struct
sk_buff
*
,
const
sctp_association_t
*
,
struct
sock
*
);
void
sctp_init_addrs
(
sctp_chunk_t
*
,
union
sctp_addr
*
,
union
sctp_addr
*
);
...
...
@@ -1115,10 +1113,7 @@ static inline sctp_endpoint_t *sctp_ep(sctp_endpoint_common_t *base)
{
sctp_endpoint_t
*
ep
;
/* We are not really a list, but the list_entry() macro is
* really quite generic to find the address of an outter struct.
*/
ep
=
list_entry
(
base
,
sctp_endpoint_t
,
base
);
ep
=
container_of
(
base
,
sctp_endpoint_t
,
base
);
return
ep
;
}
...
...
@@ -1592,10 +1587,7 @@ static inline sctp_association_t *sctp_assoc(sctp_endpoint_common_t *base)
{
sctp_association_t
*
asoc
;
/* We are not really a list, but the list_entry() macro is
* really quite generic find the address of an outter struct.
*/
asoc
=
list_entry
(
base
,
sctp_association_t
,
base
);
asoc
=
container_of
(
base
,
sctp_association_t
,
base
);
return
asoc
;
}
...
...
net/sctp/associola.c
View file @
033f9f79
...
...
@@ -629,11 +629,6 @@ __u32 __sctp_association_get_tsn_block(sctp_association_t *asoc, int num)
return
retval
;
}
/* Fetch the next Stream Sequence Number for stream number 'sid'. */
__u16
__sctp_association_get_next_ssn
(
sctp_association_t
*
asoc
,
__u16
sid
)
{
return
sctp_ssn_next
(
&
asoc
->
ssnmap
->
out
,
sid
);
}
/* Compare two addresses to see if they match. Wildcard addresses
* only match themselves.
...
...
net/sctp/outqueue.c
View file @
033f9f79
...
...
@@ -571,6 +571,7 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
sctp_chunk_t
*
first_frag
,
*
frag
;
struct
list_head
*
frag_list
;
int
nfrags
;
__u8
old_flags
,
flags
;
/* nfrags = no. of max size fragments + any smaller last fragment. */
nfrags
=
((
chunk_data_len
/
max_frag_data_len
)
+
...
...
@@ -579,9 +580,16 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
/* Start of the data in the chunk. */
data_ptr
+=
sizeof
(
sctp_datahdr_t
);
/* Are we fragmenting an already fragmented large message? */
old_flags
=
chunk
->
chunk_hdr
->
flags
;
if
(
old_flags
&
SCTP_DATA_FIRST_FRAG
)
flags
=
SCTP_DATA_FIRST_FRAG
;
else
flags
=
SCTP_DATA_MIDDLE_FRAG
;
/* Make the first fragment. */
first_frag
=
sctp_make_datafrag
(
asoc
,
sinfo
,
max_frag_data_len
,
data_ptr
,
SCTP_DATA_FIRST_FRAG
,
ssn
);
data_ptr
,
flags
,
ssn
);
if
(
!
first_frag
)
goto
err
;
...
...
@@ -609,9 +617,14 @@ sctp_chunk_t *sctp_fragment_chunk(sctp_chunk_t *chunk,
data_ptr
+=
max_frag_data_len
;
}
if
(
old_flags
&
SCTP_DATA_LAST_FRAG
)
flags
=
SCTP_DATA_LAST_FRAG
;
else
flags
=
SCTP_DATA_MIDDLE_FRAG
;
/* Make the last fragment. */
frag
=
sctp_make_datafrag
(
asoc
,
sinfo
,
chunk_data_len
,
data_ptr
,
SCTP_DATA_LAST_FRAG
,
ssn
);
flags
,
ssn
);
if
(
!
frag
)
goto
err
;
frag
->
has_ssn
=
1
;
...
...
net/sctp/sm_make_chunk.c
View file @
033f9f79
...
...
@@ -1078,53 +1078,6 @@ void sctp_free_chunk(sctp_chunk_t *chunk)
SCTP_DBG_OBJCNT_DEC
(
chunk
);
}
/* Do a deep copy of a chunk. */
sctp_chunk_t
*
sctp_copy_chunk
(
sctp_chunk_t
*
chunk
,
const
int
priority
)
{
sctp_chunk_t
*
retval
;
long
offset
;
retval
=
t_new
(
sctp_chunk_t
,
priority
);
if
(
!
retval
)
goto
nodata
;
/* Do the shallow copy. */
*
retval
=
*
chunk
;
/* Make sure that the copy does NOT think it is on any lists. */
retval
->
next
=
NULL
;
retval
->
prev
=
NULL
;
retval
->
list
=
NULL
;
INIT_LIST_HEAD
(
&
retval
->
transmitted_list
);
INIT_LIST_HEAD
(
&
retval
->
frag_list
);
/* Now we copy the deep structure. */
retval
->
skb
=
skb_copy
(
chunk
->
skb
,
priority
);
if
(
!
retval
->
skb
)
{
kfree
(
retval
);
goto
nodata
;
}
/* Move the copy headers to point into the new skb. */
offset
=
((
__u8
*
)
retval
->
skb
->
head
)
-
((
__u8
*
)
chunk
->
skb
->
head
);
if
(
retval
->
param_hdr
.
v
)
retval
->
param_hdr
.
v
+=
offset
;
if
(
retval
->
subh
.
v
)
retval
->
subh
.
v
+=
offset
;
if
(
retval
->
chunk_end
)
((
__u8
*
)
retval
->
chunk_end
)
+=
offset
;
if
(
retval
->
chunk_hdr
)
((
__u8
*
)
retval
->
chunk_hdr
)
+=
offset
;
if
(
retval
->
sctp_hdr
)
((
__u8
*
)
retval
->
sctp_hdr
)
+=
offset
;
SCTP_DBG_OBJCNT_INC
(
chunk
);
return
retval
;
nodata:
return
NULL
;
}
/* Append bytes to the end of a chunk. Will panic if chunk is not big
* enough.
...
...
@@ -1153,7 +1106,8 @@ void *sctp_addto_chunk(sctp_chunk_t *chunk, int len, const void *data)
* chunk is not big enough.
* Returns a kernel err value.
*/
int
sctp_user_addto_chunk
(
sctp_chunk_t
*
chunk
,
int
len
,
struct
iovec
*
data
)
static
int
sctp_user_addto_chunk
(
sctp_chunk_t
*
chunk
,
int
off
,
int
len
,
struct
iovec
*
data
)
{
__u8
*
target
;
int
err
=
0
;
...
...
@@ -1162,7 +1116,7 @@ int sctp_user_addto_chunk(sctp_chunk_t *chunk, int len, struct iovec *data)
target
=
skb_put
(
chunk
->
skb
,
len
);
/* Copy data (whole iovec) into chunk */
if
((
err
=
memcpy_fromiovec
(
target
,
data
,
len
)))
if
((
err
=
memcpy_fromiovec
end
(
target
,
data
,
off
,
len
)))
goto
out
;
/* Adjust the chunk length field. */
...
...
@@ -1174,6 +1128,125 @@ int sctp_user_addto_chunk(sctp_chunk_t *chunk, int len, struct iovec *data)
return
err
;
}
/* A data chunk can have a maximum payload of (2^16 - 20). Break
* down any such message into smaller chunks. Opportunistically, fragment
* the chunks down to the current MTU constraints. We may get refragmented
* later if the PMTU changes, but it is _much better_ to fragment immediately
* with a reasonable guess than always doing our fragmentation on the
* soft-interrupt.
*/
int
sctp_datachunks_from_user
(
sctp_association_t
*
asoc
,
const
struct
sctp_sndrcvinfo
*
sinfo
,
struct
msghdr
*
msg
,
int
msg_len
,
struct
sk_buff_head
*
chunks
)
{
int
max
,
whole
,
i
,
offset
,
over
,
err
;
int
len
,
first_len
;
sctp_chunk_t
*
chunk
;
__u8
frag
;
/* What is a reasonable fragmentation point right now? */
max
=
asoc
->
pmtu
;
if
(
max
<
SCTP_MIN_PMTU
)
max
=
SCTP_MIN_PMTU
;
max
-=
SCTP_IP_OVERHEAD
;
/* Make sure not beyond maximum chunk size. */
if
(
max
>
SCTP_MAX_CHUNK_LEN
)
max
=
SCTP_MAX_CHUNK_LEN
;
/* Subtract out the overhead of a data chunk header. */
max
-=
sizeof
(
struct
sctp_data_chunk
);
whole
=
0
;
first_len
=
max
;
/* Encourage Cookie-ECHO bundling. */
if
(
asoc
->
state
<
SCTP_STATE_ESTABLISHED
)
{
whole
=
msg_len
/
(
max
-
SCTP_ARBITRARY_COOKIE_ECHO_LEN
);
/* Account for the DATA to be bundled with the COOKIE-ECHO. */
if
(
whole
)
{
first_len
=
max
-
SCTP_ARBITRARY_COOKIE_ECHO_LEN
;
msg_len
-=
first_len
;
whole
=
1
;
}
}
/* How many full sized? How many bytes leftover? */
whole
+=
msg_len
/
max
;
over
=
msg_len
%
max
;
offset
=
0
;
/* Create chunks for all the full sized DATA chunks. */
for
(
i
=
0
,
len
=
first_len
;
i
<
whole
;
i
++
)
{
frag
=
SCTP_DATA_MIDDLE_FRAG
;
if
(
0
==
i
)
frag
|=
SCTP_DATA_FIRST_FRAG
;
if
((
i
==
(
whole
-
1
))
&&
!
over
)
frag
|=
SCTP_DATA_LAST_FRAG
;
chunk
=
sctp_make_datafrag_empty
(
asoc
,
sinfo
,
len
,
frag
,
0
);
if
(
!
chunk
)
goto
nomem
;
err
=
sctp_user_addto_chunk
(
chunk
,
offset
,
len
,
msg
->
msg_iov
);
if
(
err
<
0
)
goto
errout
;
offset
+=
len
;
/* Put the chunk->skb back into the form expected by send. */
__skb_pull
(
chunk
->
skb
,
(
__u8
*
)
chunk
->
chunk_hdr
-
(
__u8
*
)
chunk
->
skb
->
data
);
__skb_queue_tail
(
chunks
,
(
struct
sk_buff
*
)
chunk
);
/* The first chunk, the first chunk was likely short
* to allow bundling, so reset to full size.
*/
if
(
0
==
i
)
len
=
max
;
}
/* .. now the leftover bytes. */
if
(
over
)
{
if
(
!
whole
)
frag
=
SCTP_DATA_NOT_FRAG
;
else
frag
=
SCTP_DATA_LAST_FRAG
;
chunk
=
sctp_make_datafrag_empty
(
asoc
,
sinfo
,
over
,
frag
,
0
);
if
(
!
chunk
)
goto
nomem
;
err
=
sctp_user_addto_chunk
(
chunk
,
offset
,
over
,
msg
->
msg_iov
);
/* Put the chunk->skb back into the form expected by send. */
__skb_pull
(
chunk
->
skb
,
(
__u8
*
)
chunk
->
chunk_hdr
-
(
__u8
*
)
chunk
->
skb
->
data
);
if
(
err
<
0
)
goto
errout
;
__skb_queue_tail
(
chunks
,
(
struct
sk_buff
*
)
chunk
);
}
err
=
0
;
goto
out
;
nomem:
err
=
-
ENOMEM
;
errout:
while
((
chunk
=
(
sctp_chunk_t
*
)
__skb_dequeue
(
chunks
)))
sctp_free_chunk
(
chunk
);
out:
return
err
;
}
/* Helper function to assign a TSN if needed. This assumes that both
* the data_hdr and association have already been assigned.
*/
...
...
@@ -1190,7 +1263,11 @@ void sctp_chunk_assign_ssn(sctp_chunk_t *chunk)
ssn
=
0
;
}
else
{
sid
=
htons
(
chunk
->
subh
.
data_hdr
->
stream
);
ssn
=
htons
(
__sctp_association_get_next_ssn
(
chunk
->
asoc
,
sid
));
if
(
chunk
->
chunk_hdr
->
flags
&
SCTP_DATA_LAST_FRAG
)
ssn
=
sctp_ssn_next
(
&
chunk
->
asoc
->
ssnmap
->
out
,
sid
);
else
ssn
=
sctp_ssn_peek
(
&
chunk
->
asoc
->
ssnmap
->
out
,
sid
);
ssn
=
htons
(
ssn
);
}
chunk
->
subh
.
data_hdr
->
ssn
=
ssn
;
...
...
net/sctp/socket.c
View file @
033f9f79
...
...
@@ -730,6 +730,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
sctp_scope_t
scope
;
long
timeo
;
__u16
sinfo_flags
=
0
;
struct
sk_buff_head
chunks
;
SCTP_DEBUG_PRINTK
(
"sctp_sendmsg(sk: %p, msg: %p, msg_len: %d)
\n
"
,
sk
,
msg
,
msg_len
);
...
...
@@ -946,19 +947,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto
out_free
;
}
/* FIXME: In the current implementation, a single chunk is created
* for the entire message initially, even if it has to be fragmented
* later. As the length field in the chunkhdr is used to set
* the chunk length, the maximum size of the chunk and hence the
* message is limited by its type(__u16).
* The real fix is to fragment the message before creating the chunks.
*/
if
(
msg_len
>
((
__u16
)(
~
(
__u16
)
0
)
-
WORD_ROUND
(
sizeof
(
sctp_data_chunk_t
)
+
1
)))
{
err
=
-
EMSGSIZE
;
goto
out_free
;
}
/* If fragmentation is disabled and the message length exceeds the
* association fragmentation point, return EMSGSIZE. The I-D
* does not specify what this error is, but this looks like
...
...
@@ -991,13 +979,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto
out_free
;
}
/* Get enough memory for the whole message. */
chunk
=
sctp_make_data_empty
(
asoc
,
sinfo
,
msg_len
);
if
(
!
chunk
)
{
err
=
-
ENOMEM
;
goto
out_free
;
}
#if 0
/* FIXME: This looks wrong so I'll comment out.
* We should be able to use this same technique for
...
...
@@ -1013,20 +994,13 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
}
#endif /* 0 */
/* Copy the message from the user. */
err
=
sctp_user_addto_chunk
(
chunk
,
msg_len
,
msg
->
msg_iov
);
if
(
err
<
0
)
/* Break the message into multiple chunks of maximum size. */
skb_queue_head_init
(
&
chunks
);
err
=
sctp_datachunks_from_user
(
asoc
,
sinfo
,
msg
,
msg_len
,
&
chunks
);
if
(
err
)
goto
out_free
;
SCTP_DEBUG_PRINTK
(
"Copied message to chunk: %p.
\n
"
,
chunk
);
/* Put the chunk->skb back into the form expected by send. */
__skb_pull
(
chunk
->
skb
,
(
__u8
*
)
chunk
->
chunk_hdr
-
(
__u8
*
)
chunk
->
skb
->
data
);
/* Do accounting for the write space. */
sctp_set_owner_w
(
chunk
);
/* Auto-connect, if we aren't connected already. */
if
(
SCTP_STATE_CLOSED
==
asoc
->
state
)
{
err
=
sctp_primitive_ASSOCIATE
(
asoc
,
NULL
);
if
(
err
<
0
)
...
...
@@ -1034,18 +1008,22 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
SCTP_DEBUG_PRINTK
(
"We associated primitively.
\n
"
);
}
/*
Send it to the lower layers.
*/
err
=
sctp_primitive_SEND
(
asoc
,
chunk
);
/*
Now send the (possibly) fragmented message.
*/
while
((
chunk
=
(
sctp_chunk_t
*
)
__skb_dequeue
(
&
chunks
)))
{
/* Do accounting for the write space. */
sctp_set_owner_w
(
chunk
);
/* Send it to the lower layers. */
sctp_primitive_SEND
(
asoc
,
chunk
);
SCTP_DEBUG_PRINTK
(
"We sent primitively.
\n
"
);
}
/* BUG: SCTP_CHECK_TIMER(sk); */
if
(
!
err
)
{
err
=
msg_len
;
goto
out_unlock
;
}
/* If we are already past ASSOCIATE, the lower
* layers are responsible for
its
cleanup.
* layers are responsible for
association
cleanup.
*/
goto
out_free_chunk
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment