Commit 1ac6fcb7 authored by Stephen Lord's avatar Stephen Lord

Merge kernel.bkbits.net:/home/repos/linux-2.5

into kernel.bkbits.net:/home/lord/xfs-2.6
parents 4b2b7a67 5e2995a5
......@@ -14,6 +14,9 @@
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <asm/processor.h>
#include <asm/spitfire.h>
static struct vm_struct * modvmlist = NULL;
static void module_unmap(void * addr)
......@@ -279,6 +282,16 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
/* Cheetah's I-cache is fully coherent. */
if (tlb_type == spitfire) {
unsigned long va;
flushw_all();
for (va = 0; va < (PAGE_SIZE << 1); va += 32)
spitfire_put_icache_tag(va, 0x0);
__asm__ __volatile__("flush %g6");
}
return 0;
}
......
......@@ -118,7 +118,6 @@ void __init smp_callin(void)
inherit_locked_prom_mappings(0);
__flush_cache_all();
__flush_tlb_all();
smp_setup_percpu_timer();
......@@ -661,7 +660,6 @@ extern unsigned long xcall_flush_tlb_range;
extern unsigned long xcall_flush_tlb_kernel_range;
extern unsigned long xcall_flush_tlb_all_spitfire;
extern unsigned long xcall_flush_tlb_all_cheetah;
extern unsigned long xcall_flush_cache_all_spitfire;
extern unsigned long xcall_report_regs;
extern unsigned long xcall_receive_signal;
extern unsigned long xcall_flush_dcache_page_cheetah;
......@@ -776,15 +774,6 @@ void smp_report_regs(void)
smp_cross_call(&xcall_report_regs, 0, 0, 0);
}
void smp_flush_cache_all(void)
{
/* Cheetah need do nothing. */
if (tlb_type == spitfire) {
smp_cross_call(&xcall_flush_cache_all_spitfire, 0, 0, 0);
__flush_cache_all();
}
}
void smp_flush_tlb_all(void)
{
if (tlb_type == spitfire)
......
......@@ -1025,19 +1025,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
}
}
void __flush_cache_all(void)
{
/* Cheetah should be fine here too. */
if (tlb_type == spitfire) {
unsigned long va;
flushw_all();
for (va = 0; va < (PAGE_SIZE << 1); va += 32)
spitfire_put_icache_tag(va, 0x0);
__asm__ __volatile__("flush %g6");
}
}
/* If not locked, zap it. */
void __flush_tlb_all(void)
{
......
......@@ -721,20 +721,6 @@ xcall_flush_tlb_all_cheetah:
stxa %g0, [%g2] ASI_IMMU_DEMAP
retry
.globl xcall_flush_cache_all_spitfire
xcall_flush_cache_all_spitfire:
sethi %hi(16383), %g2
or %g2, %lo(16383), %g2
clr %g3
1: stxa %g0, [%g3] ASI_IC_TAG
membar #Sync
add %g3, 32, %g3
cmp %g3, %g2
bleu,pt %xcc, 1b
nop
flush %g6
retry
/* These just get rescheduled to PIL vectors. */
.globl xcall_call_function
xcall_call_function:
......
......@@ -169,7 +169,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
}
/* Create the ELF interpreter info */
elf_info = current->mm->saved_auxv;
elf_info = (elf_addr_t *) current->mm->saved_auxv;
#define NEW_AUX_ENT(id, val) \
do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
......@@ -197,8 +197,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
if (k_platform) {
NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(long)u_platform);
}
NEW_AUX_ENT(AT_NULL, 0);
#undef NEW_AUX_ENT
/* AT_NULL is zero; clear the rest too */
memset(&elf_info[ei_index], 0,
sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
/* And advance past the AT_NULL entry. */
ei_index += 2;
sp = STACK_ADD(p, ei_index);
......@@ -1209,6 +1214,7 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
elf_fpxregset_t *xfpu = NULL;
#endif
int thread_status_size = 0;
elf_addr_t *auxv;
/*
* We no longer stop all VM operations.
......@@ -1290,13 +1296,14 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
numnote = 3;
auxv = (elf_addr_t *) current->mm->saved_auxv;
i = 0;
do
i += 2;
while (current->mm->saved_auxv[i - 2] != AT_NULL);
while (auxv[i - 2] != AT_NULL);
fill_note(&notes[numnote++], "CORE", NT_AUXV,
i * sizeof current->mm->saved_auxv[0],
current->mm->saved_auxv);
i * sizeof (elf_addr_t), auxv);
/* Try to dump the FPU. */
if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
......
......@@ -33,18 +33,6 @@ extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
extern void __flush_dcache_range(unsigned long start, unsigned long end);
extern void __flush_cache_all(void);
#ifndef CONFIG_SMP
#define flush_cache_all() __flush_cache_all()
#else /* CONFIG_SMP */
extern void smp_flush_cache_all(void);
#endif /* ! CONFIG_SMP */
#define flush_icache_page(vma, pg) do { } while(0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
......@@ -55,7 +43,7 @@ extern void smp_flush_cache_all(void);
extern void flush_dcache_page(struct page *page);
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#endif /* _SPARC64_CACHEFLUSH_H */
......@@ -70,7 +70,6 @@ extern void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
#define flush_cache_all() smp_flush_cache_all()
#define flush_tlb_all() smp_flush_tlb_all()
#define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
#define flush_tlb_range(vma, start, end) \
......
......@@ -62,6 +62,14 @@ struct ip_conntrack_tuple
} dst;
};
/* This is optimized opposed to a memset of the whole structure. Everything we
* really care about is the source/destination unions */
#define IP_CT_TUPLE_U_BLANK(tuple) \
do { \
(tuple)->src.u.all = 0; \
(tuple)->dst.u.all = 0; \
} while (0)
enum ip_conntrack_dir
{
IP_CT_DIR_ORIGINAL,
......
......@@ -14,5 +14,10 @@ extern int ip_nat_rule_find(struct sk_buff **pskb,
const struct net_device *out,
struct ip_conntrack *ct,
struct ip_nat_info *info);
extern unsigned int
alloc_null_binding(struct ip_conntrack *conntrack,
struct ip_nat_info *info,
unsigned int hooknum);
#endif
#endif /* _IP_NAT_RULE_H */
......@@ -524,113 +524,4 @@ typedef struct sctp_addip_chunk {
sctp_addiphdr_t addip_hdr;
} sctp_addip_chunk_t __attribute__((packed));
/* FIXME: Cleanup needs to continue below this line. */
/* ADDIP Section 3.1.1
*
* ASCONF-Request Correlation ID: 32 bits (unsigned integer)
*
* This is an opaque integer assigned by the sender to identify each
* request parameter. It is in host byte order and is only meaningful
* to the sender. The receiver of the ASCONF Chunk will copy this 32
* bit value into the ASCONF Correlation ID field of the
* ASCONF-ACK. The sender of the ASCONF can use this same value in the
* ASCONF-ACK to find which request the response is for.
*
* ASCONF Parameter: TLV format
*
* Each Address configuration change is represented by a TLV parameter
* as defined in Section 3.2. One or more requests may be present in
* an ASCONF Chunk.
*/
typedef struct {
__u32 correlation;
sctp_paramhdr_t p;
__u8 payload[0];
} sctpAsconfReq_t;
/* ADDIP
* 3.1.1 Address/Stream Configuration Change Chunk (ASCONF)
*
* This chunk is used to communicate to the remote endpoint one of the
* configuration change requests that MUST be acknowledged. The
* information carried in the ASCONF Chunk uses the form of a
* Tag-Length-Value (TLV), as described in "3.2.1
* Optional/Variable-length Parameter Format" in [RFC2960], for all
* variable parameters.
*/
typedef struct {
__u32 serial;
__u8 reserved[3];
__u8 addr_type;
__u32 addr[4];
sctpAsconfReq_t requests[0];
} sctpAsconf_t;
/* ADDIP
* 3.1.2 Address/Stream Configuration Acknowledgment Chunk (ASCONF-ACK)
*
* ASCONF-Request Correlation ID: 32 bits (unsigned integer)
*
* This value is copied from the ASCONF Correlation ID received in the
* ASCONF Chunk. It is used by the receiver of the ASCONF-ACK to identify
* which ASCONF parameter this response is associated with.
*
* ASCONF Parameter Response : TLV format
*
* The ASCONF Parameter Response is used in the ASCONF-ACK to report
* status of ASCONF processing. By default, if a responding endpoint
* does not include any Error Cause, a success is indicated. Thus a
* sender of an ASCONF-ACK MAY indicate complete success of all TLVs in
* an ASCONF by returning only the Chunk Type, Chunk Flags, Chunk Length
* (set to 8) and the Serial Number.
*/
typedef union {
struct {
__u32 correlation;
sctp_paramhdr_t header; /* success report */
} success;
struct {
__u32 correlation;
sctp_paramhdr_t header; /* error cause indication */
sctp_paramhdr_t errcause;
uint8_t request[0]; /* original request from ASCONF */
} error;
#define __correlation success.correlation
#define __header success.header
#define __cause error.errcause
#define __request error.request
} sctpAsconfAckRsp_t;
/* ADDIP
* 3.1.2 Address/Stream Configuration Acknowledgment Chunk (ASCONF-ACK)
*
* This chunk is used by the receiver of an ASCONF Chunk to
* acknowledge the reception. It carries zero or more results for any
* ASCONF Parameters that were processed by the receiver.
*/
typedef struct {
__u32 serial;
sctpAsconfAckRsp_t responses[0];
} sctpAsconfAck_t;
/*********************************************************************
* Internal structures
*
* These are data structures which never go out on the wire.
*********************************************************************/
/* What is this data structure for? The TLV isn't one--it is just a
* value. Perhaps this data structure ought to have a type--otherwise
* it is not unambigiously parseable. --piggy
*/
typedef struct {
struct list_head hook;
int length; /* length of the TLV */
/* the actually TLV to be copied into ASCONF_ACK */
sctpAsconfAckRsp_t TLV;
} sctpAsconfAckRspNode_t;
#endif /* __LINUX_SCTP_H__ */
......@@ -885,7 +885,7 @@ static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
*/
static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
{
return (len > skb->len) ? NULL : __skb_pull(skb, len);
return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
}
extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
......@@ -901,7 +901,7 @@ static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len)
static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
{
return (len > skb->len) ? NULL : __pskb_pull(skb, len);
return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
}
static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
......@@ -1052,7 +1052,7 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
int gfp_mask)
{
struct sk_buff *skb = alloc_skb(length + 16, gfp_mask);
if (skb)
if (likely(skb))
skb_reserve(skb, 16);
return skb;
}
......
......@@ -244,6 +244,7 @@ enum
NET_IPV4_NEIGH=17,
NET_IPV4_ROUTE=18,
NET_IPV4_FIB_HASH=19,
NET_IPV4_NETFILTER=20,
NET_IPV4_TCP_TIMESTAMPS=33,
NET_IPV4_TCP_WINDOW_SCALING=34,
......@@ -358,6 +359,24 @@ enum
NET_IPV4_CONF_NOPOLICY=16,
};
/* /proc/sys/net/ipv4/netfilter */
enum
{
NET_IPV4_NF_CONNTRACK_MAX=1,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9,
NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT=10,
NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11,
NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT=12,
NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT=13,
};
/* /proc/sys/net/ipv6 */
enum {
NET_IPV6_CONF=16,
......
/* SCTP kernel reference Implementation Copyright (C) 1999-2001
* Cisco, Motorola, and IBM
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2003
* Copyright (C) 1999-2001 Cisco, Motorola
*
* This file is part of the SCTP kernel reference Implementation
*
......@@ -88,6 +89,7 @@ typedef enum {
SCTP_CMD_PART_DELIVER, /* Partial data delivery considerations. */
SCTP_CMD_RENEGE, /* Renege data on an association. */
SCTP_CMD_SETUP_T4, /* ADDIP, setup T4 RTO timer parms. */
SCTP_CMD_PROCESS_OPERR, /* Process an ERROR chunk. */
SCTP_CMD_LAST
} sctp_verb_t;
......
......@@ -116,6 +116,9 @@
#define SCTP_STATIC static
#endif
#define MSECS_TO_JIFFIES(msec) (msec * HZ / 1000)
#define JIFFIES_TO_MSECS(jiff) (jiff * 1000 / HZ)
/*
* Function declarations.
*/
......@@ -495,22 +498,19 @@ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
#define tv_lt(s, t) \
(s.tv_sec < t.tv_sec || (s.tv_sec == t.tv_sec && s.tv_usec < t.tv_usec))
/* Stolen from net/profile.h. Using it from there is more grief than
* it is worth.
*/
static inline void tv_add(const struct timeval *entered, struct timeval *leaved)
{
time_t usecs = leaved->tv_usec + entered->tv_usec;
time_t secs = leaved->tv_sec + entered->tv_sec;
if (usecs >= 1000000) {
usecs -= 1000000;
secs++;
}
leaved->tv_sec = secs;
leaved->tv_usec = usecs;
}
/* Add tv1 to tv2. */
#define TIMEVAL_ADD(tv1, tv2) \
({ \
suseconds_t usecs = (tv2).tv_usec + (tv1).tv_usec; \
time_t secs = (tv2).tv_sec + (tv1).tv_sec; \
\
if (usecs >= 1000000) { \
usecs -= 1000000; \
secs++; \
} \
(tv2).tv_sec = secs; \
(tv2).tv_usec = usecs; \
})
/* External references. */
......
......@@ -265,13 +265,19 @@ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *,
struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc,
union sctp_addr *addr,
int vparam_len);
struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *,
union sctp_addr *,
struct sockaddr *,
int, int);
struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
union sctp_addr *addr);
struct sctp_chunk *sctp_make_asconf_ack(struct sctp_association *asoc,
int serial, int vparam_len);
struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
struct sctp_chunk *asconf,
int vparam_len);
struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
union sctp_addr *addr);
void sctp_chunk_assign_tsn(struct sctp_chunk *);
void sctp_chunk_assign_ssn(struct sctp_chunk *);
......
......@@ -1085,6 +1085,10 @@ int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *);
int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *,
struct sctp_opt *);
union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
const union sctp_addr *addrs,
int addrcnt,
struct sctp_opt *opt);
union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp,
int *addrs_len, int gfp);
int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len,
......@@ -1389,6 +1393,10 @@ struct sctp_association {
__u8 ipv4_address; /* Peer understands IPv4 addresses? */
__u8 ipv6_address; /* Peer understands IPv6 addresses? */
__u8 hostname_address;/* Peer understands DNS addresses? */
/* Does peer support ADDIP? */
__u8 asconf_capable;
struct sctp_inithdr i;
int cookie_len;
void *cookie;
......
......@@ -1488,6 +1488,18 @@ static void net_tx_action(struct softirq_action *h)
}
}
static __inline__ int deliver_skb(struct sk_buff *skb,
struct packet_type *pt_prev, int last)
{
if (unlikely(!pt_prev->data))
return deliver_to_old_ones(pt_prev, skb, last);
else {
atomic_inc(&skb->users);
return pt_prev->func(skb, skb->dev, pt_prev);
}
}
#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
int (*br_handle_frame_hook)(struct sk_buff *skb);
......@@ -1495,15 +1507,8 @@ static __inline__ int handle_bridge(struct sk_buff *skb,
struct packet_type *pt_prev)
{
int ret = NET_RX_DROP;
if (pt_prev) {
if (!pt_prev->data)
ret = deliver_to_old_ones(pt_prev, skb, 0);
else {
atomic_inc(&skb->users);
ret = pt_prev->func(skb, skb->dev, pt_prev);
}
}
if (pt_prev)
ret = deliver_skb(skb, pt_prev, 0);
return ret;
}
......@@ -1551,16 +1556,8 @@ int netif_receive_skb(struct sk_buff *skb)
rcu_read_lock();
list_for_each_entry_rcu(ptype, &ptype_all, list) {
if (!ptype->dev || ptype->dev == skb->dev) {
if (pt_prev) {
if (!pt_prev->data) {
ret = deliver_to_old_ones(pt_prev,
skb, 0);
} else {
atomic_inc(&skb->users);
ret = pt_prev->func(skb, skb->dev,
pt_prev);
}
}
if (pt_prev)
ret = deliver_skb(skb, pt_prev, 0);
pt_prev = ptype;
}
}
......@@ -1573,27 +1570,15 @@ int netif_receive_skb(struct sk_buff *skb)
list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
if (ptype->type == type &&
(!ptype->dev || ptype->dev == skb->dev)) {
if (pt_prev) {
if (!pt_prev->data) {
ret = deliver_to_old_ones(pt_prev,
skb, 0);
} else {
atomic_inc(&skb->users);
ret = pt_prev->func(skb, skb->dev,
pt_prev);
}
}
if (pt_prev)
ret = deliver_skb(skb, pt_prev, 0);
pt_prev = ptype;
}
}
if (pt_prev) {
if (!pt_prev->data) {
ret = deliver_to_old_ones(pt_prev, skb, 1);
} else {
ret = pt_prev->func(skb, skb->dev, pt_prev);
}
} else {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, 1);
else {
kfree_skb(skb);
/* Jamal, now you will not able to escape explaining
* me how you were going to use this. :-)
......
......@@ -59,7 +59,7 @@ LIST_HEAD(ip_conntrack_expect_list);
LIST_HEAD(protocol_list);
static LIST_HEAD(helpers);
unsigned int ip_conntrack_htable_size = 0;
static int ip_conntrack_max;
int ip_conntrack_max;
static atomic_t ip_conntrack_count = ATOMIC_INIT(0);
struct list_head *ip_conntrack_hash;
static kmem_cache_t *ip_conntrack_cachep;
......@@ -301,7 +301,7 @@ clean_from_lists(struct ip_conntrack *ct)
static void
destroy_conntrack(struct nf_conntrack *nfct)
{
struct ip_conntrack *ct = (struct ip_conntrack *)nfct;
struct ip_conntrack *ct = (struct ip_conntrack *)nfct, *master = NULL;
struct ip_conntrack_protocol *proto;
DEBUGP("destroy_conntrack(%p)\n", ct);
......@@ -328,12 +328,15 @@ destroy_conntrack(struct nf_conntrack *nfct)
/* can't call __unexpect_related here,
* since it would screw up expect_list */
list_del(&ct->master->expected_list);
ip_conntrack_put(ct->master->expectant);
master = ct->master->expectant;
}
kfree(ct->master);
}
WRITE_UNLOCK(&ip_conntrack_lock);
if (master)
ip_conntrack_put(master);
DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct);
kmem_cache_free(ip_conntrack_cachep, ct);
atomic_dec(&ip_conntrack_count);
......@@ -1276,11 +1279,14 @@ getorigdst(struct sock *sk, int optval, void *user, int *len)
{
struct inet_opt *inet = inet_sk(sk);
struct ip_conntrack_tuple_hash *h;
struct ip_conntrack_tuple tuple = { { inet->rcv_saddr,
{ .tcp = { inet->sport } } },
{ inet->daddr,
{ .tcp = { inet->dport } },
IPPROTO_TCP } };
struct ip_conntrack_tuple tuple;
IP_CT_TUPLE_U_BLANK(&tuple);
tuple.src.ip = inet->rcv_saddr;
tuple.src.u.tcp.port = inet->sport;
tuple.dst.ip = inet->daddr;
tuple.dst.u.tcp.port = inet->dport;
tuple.dst.protonum = IPPROTO_TCP;
/* We only do TCP at the moment: is there a better way? */
if (strcmp(sk->sk_prot->name, "TCP")) {
......@@ -1325,45 +1331,6 @@ static struct nf_sockopt_ops so_getorigdst = {
.get = &getorigdst,
};
#define NET_IP_CONNTRACK_MAX 2089
#define NET_IP_CONNTRACK_MAX_NAME "ip_conntrack_max"
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *ip_conntrack_sysctl_header;
static ctl_table ip_conntrack_table[] = {
{
.ctl_name = NET_IP_CONNTRACK_MAX,
.procname = NET_IP_CONNTRACK_MAX_NAME,
.data = &ip_conntrack_max,
.maxlen = sizeof(ip_conntrack_max),
.mode = 0644,
.proc_handler = proc_dointvec
},
{ .ctl_name = 0 }
};
static ctl_table ip_conntrack_dir_table[] = {
{
.ctl_name = NET_IPV4,
.procname = "ipv4",
.mode = 0555,
.child = ip_conntrack_table
},
{ .ctl_name = 0 }
};
static ctl_table ip_conntrack_root_table[] = {
{
.ctl_name = CTL_NET,
.procname = "net",
.mode = 0555,
.child = ip_conntrack_dir_table
},
{ .ctl_name = 0 }
};
#endif /*CONFIG_SYSCTL*/
static int kill_all(const struct ip_conntrack *i, void *data)
{
return 1;
......@@ -1373,9 +1340,6 @@ static int kill_all(const struct ip_conntrack *i, void *data)
supposed to kill the mall. */
void ip_conntrack_cleanup(void)
{
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(ip_conntrack_sysctl_header);
#endif
ip_ct_attach = NULL;
/* This makes sure all current packets have passed through
netfilter framework. Roll on, two-stage module
......@@ -1453,25 +1417,10 @@ int __init ip_conntrack_init(void)
for (i = 0; i < ip_conntrack_htable_size; i++)
INIT_LIST_HEAD(&ip_conntrack_hash[i]);
/* This is fucking braindead. There is NO WAY of doing this without
the CONFIG_SYSCTL unless you don't want to detect errors.
Grrr... --RR */
#ifdef CONFIG_SYSCTL
ip_conntrack_sysctl_header
= register_sysctl_table(ip_conntrack_root_table, 0);
if (ip_conntrack_sysctl_header == NULL) {
goto err_free_ct_cachep;
}
#endif /*CONFIG_SYSCTL*/
/* For use by ipt_REJECT */
ip_ct_attach = ip_conntrack_attach;
return ret;
#ifdef CONFIG_SYSCTL
err_free_ct_cachep:
kmem_cache_destroy(ip_conntrack_cachep);
#endif /*CONFIG_SYSCTL*/
err_free_hash:
vfree(ip_conntrack_hash);
err_unreg_sockopt:
......
......@@ -177,7 +177,10 @@ static int help(struct sk_buff *skb,
DEBUGP("DCC bound ip/port: %u.%u.%u.%u:%u\n",
HIPQUAD(dcc_ip), dcc_port);
if (ct->tuplehash[dir].tuple.src.ip != htonl(dcc_ip)) {
/* dcc_ip can be the internal OR external (NAT'ed) IP
* Tiago Sousa <mirage@kaotik.org> */
if (ct->tuplehash[dir].tuple.src.ip != htonl(dcc_ip)
&& ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip != htonl(dcc_ip)) {
if (net_ratelimit())
printk(KERN_WARNING
"Forged DCC command from "
......@@ -201,7 +204,7 @@ static int help(struct sk_buff *skb,
exp->tuple = ((struct ip_conntrack_tuple)
{ { 0, { 0 } },
{ htonl(dcc_ip), { .tcp = { htons(dcc_port) } },
{ ct->tuplehash[dir].tuple.src.ip, { .tcp = { htons(dcc_port) } },
IPPROTO_TCP }});
exp->mask = ((struct ip_conntrack_tuple)
{ { 0, { 0 } },
......
......@@ -4,7 +4,7 @@
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
#define GENERIC_TIMEOUT (600*HZ)
unsigned long ip_ct_generic_timeout = 600*HZ;
static int generic_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
......@@ -44,7 +44,7 @@ static int packet(struct ip_conntrack *conntrack,
const struct sk_buff *skb,
enum ip_conntrack_info conntrackinfo)
{
ip_ct_refresh(conntrack, GENERIC_TIMEOUT);
ip_ct_refresh(conntrack, ip_ct_generic_timeout);
return NF_ACCEPT;
}
......
......@@ -6,7 +6,7 @@
#include <linux/icmp.h>
#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
#define ICMP_TIMEOUT (30*HZ)
unsigned long ip_ct_icmp_timeout = 30*HZ;
#if 0
#define DEBUGP printk
......@@ -86,7 +86,7 @@ static int icmp_packet(struct ip_conntrack *ct,
ct->timeout.function((unsigned long)ct);
} else {
atomic_inc(&ct->proto.icmp.count);
ip_ct_refresh(ct, ICMP_TIMEOUT);
ip_ct_refresh(ct, ip_ct_icmp_timeout);
}
return NF_ACCEPT;
......
......@@ -49,19 +49,27 @@ static const char *tcp_conntrack_names[] = {
#define HOURS * 60 MINS
#define DAYS * 24 HOURS
static unsigned long tcp_timeouts[]
= { 30 MINS, /* TCP_CONNTRACK_NONE, */
5 DAYS, /* TCP_CONNTRACK_ESTABLISHED, */
2 MINS, /* TCP_CONNTRACK_SYN_SENT, */
60 SECS, /* TCP_CONNTRACK_SYN_RECV, */
2 MINS, /* TCP_CONNTRACK_FIN_WAIT, */
2 MINS, /* TCP_CONNTRACK_TIME_WAIT, */
10 SECS, /* TCP_CONNTRACK_CLOSE, */
60 SECS, /* TCP_CONNTRACK_CLOSE_WAIT, */
30 SECS, /* TCP_CONNTRACK_LAST_ACK, */
2 MINS, /* TCP_CONNTRACK_LISTEN, */
};
unsigned long ip_ct_tcp_timeout_syn_sent = 2 MINS;
unsigned long ip_ct_tcp_timeout_syn_recv = 60 SECS;
unsigned long ip_ct_tcp_timeout_established = 5 DAYS;
unsigned long ip_ct_tcp_timeout_fin_wait = 2 MINS;
unsigned long ip_ct_tcp_timeout_close_wait = 3 DAYS;
unsigned long ip_ct_tcp_timeout_last_ack = 30 SECS;
unsigned long ip_ct_tcp_timeout_time_wait = 2 MINS;
unsigned long ip_ct_tcp_timeout_close = 10 SECS;
static unsigned long * tcp_timeouts[]
= { 0, /* TCP_CONNTRACK_NONE */
&ip_ct_tcp_timeout_established, /* TCP_CONNTRACK_ESTABLISHED, */
&ip_ct_tcp_timeout_syn_sent, /* TCP_CONNTRACK_SYN_SENT, */
&ip_ct_tcp_timeout_syn_recv, /* TCP_CONNTRACK_SYN_RECV, */
&ip_ct_tcp_timeout_fin_wait, /* TCP_CONNTRACK_FIN_WAIT, */
&ip_ct_tcp_timeout_time_wait, /* TCP_CONNTRACK_TIME_WAIT, */
&ip_ct_tcp_timeout_close, /* TCP_CONNTRACK_CLOSE, */
&ip_ct_tcp_timeout_close_wait, /* TCP_CONNTRACK_CLOSE_WAIT, */
&ip_ct_tcp_timeout_last_ack, /* TCP_CONNTRACK_LAST_ACK, */
0, /* TCP_CONNTRACK_LISTEN */
};
#define sNO TCP_CONNTRACK_NONE
#define sES TCP_CONNTRACK_ESTABLISHED
......@@ -204,7 +212,7 @@ static int tcp_packet(struct ip_conntrack *conntrack,
set_bit(IPS_ASSURED_BIT, &conntrack->status);
WRITE_UNLOCK(&tcp_lock);
ip_ct_refresh(conntrack, tcp_timeouts[newconntrack]);
ip_ct_refresh(conntrack, *tcp_timeouts[newconntrack]);
}
return NF_ACCEPT;
......
......@@ -6,8 +6,8 @@
#include <linux/udp.h>
#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
#define UDP_TIMEOUT (30*HZ)
#define UDP_STREAM_TIMEOUT (180*HZ)
unsigned long ip_ct_udp_timeout = 30*HZ;
unsigned long ip_ct_udp_timeout_stream = 180*HZ;
static int udp_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
......@@ -57,11 +57,11 @@ static int udp_packet(struct ip_conntrack *conntrack,
/* If we've seen traffic both ways, this is some kind of UDP
stream. Extend timeout. */
if (test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) {
ip_ct_refresh(conntrack, UDP_STREAM_TIMEOUT);
ip_ct_refresh(conntrack, ip_ct_udp_timeout_stream);
/* Also, more likely to be important, and not a probe */
set_bit(IPS_ASSURED_BIT, &conntrack->status);
} else
ip_ct_refresh(conntrack, UDP_TIMEOUT);
ip_ct_refresh(conntrack, ip_ct_udp_timeout);
return NF_ACCEPT;
}
......
......@@ -7,6 +7,7 @@
/* (c) 1999 Paul `Rusty' Russell. Licenced under the GNU General
Public Licence. */
#include <linux/config.h>
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/netfilter.h>
......@@ -14,6 +15,9 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
#include <net/checksum.h>
#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_conntrack_lock)
......@@ -256,6 +260,98 @@ static struct nf_hook_ops ip_conntrack_local_in_ops = {
.priority = NF_IP_PRI_LAST-1,
};
/* Sysctl support */
#ifdef CONFIG_SYSCTL
/* From ip_conntrack_core.c */
extern int ip_conntrack_max;
/* From ip_conntrack_proto_tcp.c */
extern unsigned long ip_ct_tcp_timeout_syn_sent;
extern unsigned long ip_ct_tcp_timeout_syn_recv;
extern unsigned long ip_ct_tcp_timeout_established;
extern unsigned long ip_ct_tcp_timeout_fin_wait;
extern unsigned long ip_ct_tcp_timeout_close_wait;
extern unsigned long ip_ct_tcp_timeout_last_ack;
extern unsigned long ip_ct_tcp_timeout_time_wait;
extern unsigned long ip_ct_tcp_timeout_close;
/* From ip_conntrack_proto_udp.c */
extern unsigned long ip_ct_udp_timeout;
extern unsigned long ip_ct_udp_timeout_stream;
/* From ip_conntrack_proto_icmp.c */
extern unsigned long ip_ct_icmp_timeout;
/* From ip_conntrack_proto_icmp.c */
extern unsigned long ip_ct_generic_timeout;
static struct ctl_table_header *ip_ct_sysctl_header;
static ctl_table ip_ct_sysctl_table[] = {
{NET_IPV4_NF_CONNTRACK_MAX, "ip_conntrack_max",
&ip_conntrack_max, sizeof(int), 0644, NULL,
&proc_dointvec},
{NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, "ip_conntrack_tcp_timeout_syn_sent",
&ip_ct_tcp_timeout_syn_sent, sizeof(unsigned int), 0644, NULL,
&proc_dointvec_jiffies},
{NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, "ip_conntrack_tcp_timeout_syn_recv",
&ip_ct_tcp_timeout_syn_recv, sizeof(unsigned int), 0644, NULL,
&proc_dointvec_jiffies},
{NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED, "ip_conntrack_tcp_timeout_established",
&ip_ct_tcp_timeout_established, sizeof(unsigned int), 0644, NULL,
&proc_dointvec_jiffies},
{NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT, "ip_conntrack_tcp_timeout_fin_wait",
&ip_ct_tcp_timeout_fin_wait, sizeof(unsigned int), 0644, NULL,
&proc_dointvec_jiffies},
{NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT, "ip_conntrack_tcp_timeout_close_wait",
&ip_ct_tcp_timeout_close_wait, sizeof(unsigned int), 0644, NULL,
&proc_dointvec_jiffies},
{NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK, "ip_conntrack_tcp_timeout_last_ack",
&ip_ct_tcp_timeout_last_ack, sizeof(unsigned int), 0644, NULL,
&proc_dointvec_jiffies},
{NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT, "ip_conntrack_tcp_timeout_time_wait",
&ip_ct_tcp_timeout_time_wait, sizeof(unsigned int), 0644, NULL,
&proc_dointvec_jiffies},
{NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE, "ip_conntrack_tcp_timeout_close",
&ip_ct_tcp_timeout_close, sizeof(unsigned int), 0644, NULL,
&proc_dointvec_jiffies},
{NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT, "ip_conntrack_udp_timeout",
&ip_ct_udp_timeout, sizeof(unsigned int), 0644, NULL,
&proc_dointvec_jiffies},
{NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM, "ip_conntrack_udp_timeout_stream",
&ip_ct_udp_timeout_stream, sizeof(unsigned int), 0644, NULL,
&proc_dointvec_jiffies},
{NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT, "ip_conntrack_icmp_timeout",
&ip_ct_icmp_timeout, sizeof(unsigned int), 0644, NULL,
&proc_dointvec_jiffies},
{NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT, "ip_conntrack_generic_timeout",
&ip_ct_generic_timeout, sizeof(unsigned int), 0644, NULL,
&proc_dointvec_jiffies},
{0}
};
#define NET_IP_CONNTRACK_MAX 2089
static ctl_table ip_ct_netfilter_table[] = {
{NET_IPV4_NETFILTER, "netfilter", NULL, 0, 0555, ip_ct_sysctl_table, 0, 0, 0, 0, 0},
{NET_IP_CONNTRACK_MAX, "ip_conntrack_max",
&ip_conntrack_max, sizeof(int), 0644, NULL,
&proc_dointvec},
{0}
};
static ctl_table ip_ct_ipv4_table[] = {
{NET_IPV4, "ipv4", NULL, 0, 0555, ip_ct_netfilter_table, 0, 0, 0, 0, 0},
{0}
};
static ctl_table ip_ct_net_table[] = {
{CTL_NET, "net", NULL, 0, 0555, ip_ct_ipv4_table, 0, 0, 0, 0, 0},
{0}
};
#endif
static int init_or_cleanup(int init)
{
struct proc_dir_entry *proc;
......@@ -291,10 +387,20 @@ static int init_or_cleanup(int init)
printk("ip_conntrack: can't register local in hook.\n");
goto cleanup_inoutandlocalops;
}
#ifdef CONFIG_SYSCTL
ip_ct_sysctl_header = register_sysctl_table(ip_ct_net_table, 0);
if (ip_ct_sysctl_header == NULL) {
printk("ip_conntrack: can't register to sysctl.\n");
goto cleanup;
}
#endif
return ret;
cleanup:
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(ip_ct_sysctl_header);
#endif
nf_unregister_hook(&ip_conntrack_local_in_ops);
cleanup_inoutandlocalops:
nf_unregister_hook(&ip_conntrack_out_ops);
......
......@@ -516,12 +516,14 @@ ip_nat_setup_info(struct ip_conntrack *conntrack,
struct ip_conntrack_tuple new_tuple, inv_tuple, reply;
struct ip_conntrack_tuple orig_tp;
struct ip_nat_info *info = &conntrack->nat.info;
int in_hashes = info->initialized;
MUST_BE_WRITE_LOCKED(&ip_nat_lock);
IP_NF_ASSERT(hooknum == NF_IP_PRE_ROUTING
|| hooknum == NF_IP_POST_ROUTING
|| hooknum == NF_IP_LOCAL_OUT);
IP_NF_ASSERT(info->num_manips < IP_NAT_MAX_MANIPS);
IP_NF_ASSERT(!(info->initialized & (1 << HOOK2MANIP(hooknum))));
/* What we've got will look like inverse of reply. Normally
this is what is in the conntrack, except for prior
......@@ -638,6 +640,14 @@ ip_nat_setup_info(struct ip_conntrack *conntrack,
/* It's done. */
info->initialized |= (1 << HOOK2MANIP(hooknum));
if (in_hashes) {
IP_NF_ASSERT(info->bysource.conntrack);
replace_in_hashes(conntrack, info);
} else {
place_in_hashes(conntrack, info);
}
return NF_ACCEPT;
}
......@@ -761,11 +771,6 @@ do_bindings(struct ip_conntrack *ct,
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
int proto = (*pskb)->nh.iph->protocol;
/* Skip everything and don't call helpers if there are no
* manips for this connection */
if (info->num_manips == 0)
return NF_ACCEPT;
/* Need nat lock to protect against modification, but neither
conntrack (referenced) and helper (deleted with
synchronize_bh()) can vanish. */
......
......@@ -122,7 +122,6 @@ static void mangle_contents(struct sk_buff *skb,
/* fix IP hdr checksum information */
skb->nh.iph->tot_len = htons(skb->len);
ip_send_check(skb->nh.iph);
skb->csum = csum_partial(data, skb->len - dataoff, 0);
}
/* Unusual, but possible case. */
......@@ -167,6 +166,7 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb,
{
struct iphdr *iph;
struct tcphdr *tcph;
int datalen;
if (!skb_ip_make_writable(pskb, (*pskb)->len))
return 0;
......@@ -184,11 +184,11 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb,
mangle_contents(*pskb, iph->ihl*4 + tcph->doff*4,
match_offset, match_len, rep_buffer, rep_len);
datalen = (*pskb)->len - iph->ihl*4;
tcph->check = 0;
tcph->check = tcp_v4_check(tcph, (*pskb)->len - iph->ihl*4,
iph->saddr, iph->daddr,
csum_partial((char *)tcph, tcph->doff*4,
(*pskb)->csum));
tcph->check = tcp_v4_check(tcph, datalen, iph->saddr, iph->daddr,
csum_partial((char *)tcph, datalen, 0));
adjust_tcp_sequence(ntohl(tcph->seq),
(int)rep_len - (int)match_len,
ct, ctinfo);
......@@ -216,7 +216,12 @@ ip_nat_mangle_udp_packet(struct sk_buff **pskb,
{
struct iphdr *iph;
struct udphdr *udph;
int need_csum = ((*pskb)->csum != 0);
/* UDP helpers might accidentally mangle the wrong packet */
iph = (*pskb)->nh.iph;
if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) +
match_offset + match_len)
return 0;
if (!skb_ip_make_writable(pskb, (*pskb)->len))
return 0;
......@@ -235,17 +240,15 @@ ip_nat_mangle_udp_packet(struct sk_buff **pskb,
udph->len = htons((*pskb)->len - iph->ihl*4);
/* fix udp checksum if udp checksum was previously calculated */
if (need_csum) {
if (udph->check) {
int datalen = (*pskb)->len - iph->ihl * 4;
udph->check = 0;
udph->check
= csum_tcpudp_magic(iph->saddr, iph->daddr,
(*pskb)->len - iph->ihl*4,
IPPROTO_UDP,
udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
datalen, IPPROTO_UDP,
csum_partial((char *)udph,
sizeof(struct udphdr),
(*pskb)->csum));
} else
(*pskb)->csum = 0;
datalen, 0));
}
return 1;
}
......
......@@ -233,7 +233,7 @@ static int ipt_dnat_checkentry(const char *tablename,
return 1;
}
static inline unsigned int
inline unsigned int
alloc_null_binding(struct ip_conntrack *conntrack,
struct ip_nat_info *info,
unsigned int hooknum)
......
......@@ -119,7 +119,6 @@ ip_nat_fn(unsigned int hooknum,
/* Seen it before? This can happen for loopback, retrans,
or local packets.. */
if (!(info->initialized & (1 << maniptype))) {
int in_hashes = info->initialized;
unsigned int ret;
if (ct->master
......@@ -130,9 +129,10 @@ ip_nat_fn(unsigned int hooknum,
} else {
#ifdef CONFIG_IP_NF_NAT_LOCAL
/* LOCAL_IN hook doesn't have a chain! */
if (hooknum == NF_IP_LOCAL_IN) {
ret = NF_ACCEPT;
} else
if (hooknum == NF_IP_LOCAL_IN)
ret = alloc_null_binding(ct, info,
hooknum);
else
#endif
ret = ip_nat_rule_find(pskb, hooknum, in, out,
ct, info);
......@@ -142,13 +142,6 @@ ip_nat_fn(unsigned int hooknum,
WRITE_UNLOCK(&ip_nat_lock);
return ret;
}
if (in_hashes) {
IP_NF_ASSERT(info->bysource.conntrack);
replace_in_hashes(ct, info);
} else {
place_in_hashes(ct, info);
}
} else
DEBUGP("Already setup manip %s for ct %p\n",
maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST",
......@@ -199,6 +192,7 @@ ip_nat_out(unsigned int hooknum,
return ip_nat_fn(hooknum, pskb, in, out, okfn);
}
#ifdef CONFIG_IP_NF_NAT_LOCAL
static unsigned int
ip_nat_local_fn(unsigned int hooknum,
struct sk_buff **pskb,
......@@ -224,6 +218,7 @@ ip_nat_local_fn(unsigned int hooknum,
return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP;
return ret;
}
#endif
/* We must be after connection tracking and before packet filtering. */
......@@ -245,6 +240,7 @@ static struct nf_hook_ops ip_nat_out_ops = {
.priority = NF_IP_PRI_NAT_SRC,
};
#ifdef CONFIG_IP_NF_NAT_LOCAL
/* Before packet filtering, change destination */
static struct nf_hook_ops ip_nat_local_out_ops = {
.hook = ip_nat_local_fn,
......@@ -254,7 +250,7 @@ static struct nf_hook_ops ip_nat_local_out_ops = {
.priority = NF_IP_PRI_NAT_DST,
};
#ifdef CONFIG_IP_NF_NAT_LOCAL
/* After packet filtering, change source for reply packets of LOCAL_OUT DNAT */
static struct nf_hook_ops ip_nat_local_in_ops = {
.hook = ip_nat_fn,
.owner = THIS_MODULE,
......@@ -324,12 +320,12 @@ static int init_or_cleanup(int init)
printk("ip_nat_init: can't register out hook.\n");
goto cleanup_inops;
}
#ifdef CONFIG_IP_NF_NAT_LOCAL
ret = nf_register_hook(&ip_nat_local_out_ops);
if (ret < 0) {
printk("ip_nat_init: can't register local out hook.\n");
goto cleanup_outops;
}
#ifdef CONFIG_IP_NF_NAT_LOCAL
ret = nf_register_hook(&ip_nat_local_in_ops);
if (ret < 0) {
printk("ip_nat_init: can't register local in hook.\n");
......@@ -342,9 +338,9 @@ static int init_or_cleanup(int init)
#ifdef CONFIG_IP_NF_NAT_LOCAL
nf_unregister_hook(&ip_nat_local_in_ops);
cleanup_localoutops:
#endif
nf_unregister_hook(&ip_nat_local_out_ops);
cleanup_outops:
#endif
nf_unregister_hook(&ip_nat_out_ops);
cleanup_inops:
nf_unregister_hook(&ip_nat_in_ops);
......
......@@ -40,15 +40,16 @@ static void connection_attach(struct sk_buff *new_skb, struct nf_ct_info *nfct)
}
}
static inline struct rtable *route_reverse(struct sk_buff *skb, int local)
static inline struct rtable *route_reverse(struct sk_buff *skb, int hook)
{
struct iphdr *iph = skb->nh.iph;
struct dst_entry *odst;
struct flowi fl = {};
struct rtable *rt;
if (local) {
if (hook != NF_IP_FORWARD) {
fl.nl_u.ip4_u.daddr = iph->saddr;
if (hook == NF_IP_LOCAL_IN)
fl.nl_u.ip4_u.saddr = iph->daddr;
fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
......@@ -81,7 +82,7 @@ static inline struct rtable *route_reverse(struct sk_buff *skb, int local)
}
/* Send RST reply */
static void send_reset(struct sk_buff *oldskb, int local)
static void send_reset(struct sk_buff *oldskb, int hook)
{
struct sk_buff *nskb;
struct tcphdr otcph, *tcph;
......@@ -104,7 +105,7 @@ static void send_reset(struct sk_buff *oldskb, int local)
return;
/* FIXME: Check checksum --RR */
if ((rt = route_reverse(oldskb, local)) == NULL)
if ((rt = route_reverse(oldskb, hook)) == NULL)
return;
hh_len = (rt->u.dst.dev->hard_header_len + 15)&~15;
......@@ -390,7 +391,7 @@ static unsigned int reject(struct sk_buff **pskb,
send_unreach(*pskb, ICMP_PKT_FILTERED);
break;
case IPT_TCP_RESET:
send_reset(*pskb, hooknum == NF_IP_LOCAL_IN);
send_reset(*pskb, hooknum);
case IPT_ICMP_ECHOREPLY:
/* Doesn't happen. */
break;
......
......@@ -126,18 +126,11 @@ ipv6header_checkentry(const char *tablename,
return 1;
}
static void
ipv6header_destroy(void *matchinfo,
unsigned int matchinfosize)
{
return;
}
static struct ip6t_match ip6t_ipv6header_match = {
.name = "ipv6header",
.match = &ipv6header_match,
.checkentry = &ipv6header_checkentry,
.destroy = &ipv6header_destroy,
.destroy = NULL,
.me = THIS_MODULE,
};
......
......@@ -141,9 +141,9 @@ struct sctp_association *sctp_association_init(struct sctp_association *asoc,
* socket values.
*/
asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
asoc->rto_initial = sp->rtoinfo.srto_initial * HZ / 1000;
asoc->rto_max = sp->rtoinfo.srto_max * HZ / 1000;
asoc->rto_min = sp->rtoinfo.srto_min * HZ / 1000;
asoc->rto_initial = MSECS_TO_JIFFIES(sp->rtoinfo.srto_initial);
asoc->rto_max = MSECS_TO_JIFFIES(sp->rtoinfo.srto_max);
asoc->rto_min = MSECS_TO_JIFFIES(sp->rtoinfo.srto_min);
asoc->overall_error_count = 0;
......@@ -168,7 +168,8 @@ struct sctp_association *sctp_association_init(struct sctp_association *asoc,
asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams;
asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
asoc->max_init_timeo = sp->initmsg.sinit_max_init_timeo * HZ / 1000;
asoc->max_init_timeo =
MSECS_TO_JIFFIES(sp->initmsg.sinit_max_init_timeo);
/* Allocate storage for the ssnmap after the inbound and outbound
* streams have been negotiated during Init.
......@@ -246,6 +247,11 @@ struct sctp_association *sctp_association_init(struct sctp_association *asoc,
*/
asoc->peer.sack_needed = 1;
/* Assume that the peer recongizes ASCONF until reported otherwise
* via an ERROR chunk.
*/
asoc->peer.asconf_capable = 1;
/* Create an input queue. */
sctp_inq_init(&asoc->base.inqueue);
sctp_inq_set_th_handler(&asoc->base.inqueue,
......@@ -495,7 +501,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
/* Initialize the peer's heartbeat interval based on the
* sock configured value.
*/
peer->hb_interval = sp->paddrparam.spp_hbinterval * HZ;
peer->hb_interval = MSECS_TO_JIFFIES(sp->paddrparam.spp_hbinterval);
/* Set the path max_retrans. */
peer->max_retrans = asoc->max_retrans;
......
......@@ -324,6 +324,43 @@ int sctp_bind_addr_match(struct sctp_bind_addr *bp,
return 0;
}
/* Find the first address in the bind address list that is not present in
* the addrs packed array.
*/
union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
const union sctp_addr *addrs,
int addrcnt,
struct sctp_opt *opt)
{
struct sctp_sockaddr_entry *laddr;
union sctp_addr *addr;
void *addr_buf;
struct sctp_af *af;
struct list_head *pos;
int i;
list_for_each(pos, &bp->address_list) {
laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
addr_buf = (union sctp_addr *)addrs;
for (i = 0; i < addrcnt; i++) {
addr = (union sctp_addr *)addr_buf;
af = sctp_get_af_specific(addr->v4.sin_family);
if (!af)
return NULL;
if (opt->pf->cmp_addr(&laddr->a, addr, opt))
break;
addr_buf += af->sockaddr_len;
}
if (i == addrcnt)
return &laddr->a;
}
return NULL;
}
/* Copy out addresses from the global local address list. */
static int sctp_copy_one_addr(struct sctp_bind_addr *dest,
union sctp_addr *addr,
......
......@@ -129,7 +129,7 @@ struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
ep->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
SCTP_DEFAULT_TIMEOUT_T1_INIT;
ep->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] =
sp->rtoinfo.srto_initial * HZ / 1000;
MSECS_TO_JIFFIES(sp->rtoinfo.srto_initial);
ep->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
ep->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0;
......@@ -138,7 +138,7 @@ struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
* recommended value of 5 times 'RTO.Max'.
*/
ep->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
= 5 * (sp->rtoinfo.srto_max * HZ / 1000);
= 5 * MSECS_TO_JIFFIES(sp->rtoinfo.srto_max);
ep->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] =
SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
......
/* SCTP kernel reference Implementation
* Copyright (C) IBM Corp. 2001, 2003
* (C) Copyright IBM Corp. 2001, 2003
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2002 Intel Corp.
......@@ -1288,7 +1288,7 @@ sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
/* Set an expiration time for the cookie. */
do_gettimeofday(&cookie->c.expiration);
tv_add(&asoc->cookie_life, &cookie->c.expiration);
TIMEVAL_ADD(asoc->cookie_life, cookie->c.expiration);
/* Copy the peer's init packet. */
memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr,
......@@ -2021,11 +2021,11 @@ struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc,
sctp_addiphdr_t asconf;
struct sctp_chunk *retval;
int length = sizeof(asconf) + vparam_len;
union sctp_params addrparam;
union sctp_addr_param addrparam;
int addrlen;
struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family);
addrlen = af->to_addr_param(addr, (union sctp_addr_param *)&addrparam);
addrlen = af->to_addr_param(addr, &addrparam);
if (!addrlen)
return NULL;
length += addrlen;
......@@ -2045,6 +2045,83 @@ struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc,
return retval;
}
/* ADDIP
* 3.2.1 Add IP Address
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Type = 0xC001 | Length = Variable |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ASCONF-Request Correlation ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Address Parameter |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* 3.2.2 Delete IP Address
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Type = 0xC002 | Length = Variable |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ASCONF-Request Correlation ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Address Parameter |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
*/
struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
union sctp_addr *laddr,
struct sockaddr *addrs,
int addrcnt,
int flags)
{
sctp_addip_param_t param;
struct sctp_chunk *retval;
union sctp_addr_param addr_param;
union sctp_addr *addr;
void *addr_buf;
struct sctp_af *af;
int paramlen = sizeof(param);
int addr_param_len = 0;
int totallen = 0;
int i;
/* Get total length of all the address parameters. */
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
addr = (union sctp_addr *)addr_buf;
af = sctp_get_af_specific(addr->v4.sin_family);
addr_param_len = af->to_addr_param(addr, &addr_param);
totallen += paramlen;
totallen += addr_param_len;
addr_buf += af->sockaddr_len;
}
/* Create an asconf chunk with the required length. */
retval = sctp_make_asconf(asoc, laddr, totallen);
if (!retval)
return NULL;
/* Add the address parameters to the asconf chunk. */
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
addr = (union sctp_addr *)addr_buf;
af = sctp_get_af_specific(addr->v4.sin_family);
addr_param_len = af->to_addr_param(addr, &addr_param);
param.param_hdr.type = flags;
param.param_hdr.length = htons(paramlen + addr_param_len);
param.crr_id = htonl(i);
sctp_addto_chunk(retval, paramlen, &param);
sctp_addto_chunk(retval, addr_param_len, &addr_param);
addr_buf += af->sockaddr_len;
}
return retval;
}
/* ADDIP
* 3.2.4 Set Primary IP Address
* 0 1 2 3
......@@ -2065,11 +2142,11 @@ struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
sctp_addip_param_t param;
struct sctp_chunk *retval;
int len = sizeof(param);
union sctp_params addrparam;
union sctp_addr_param addrparam;
int addrlen;
struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family);
addrlen = af->to_addr_param(addr, (union sctp_addr_param *)&addrparam);
addrlen = af->to_addr_param(addr, &addrparam);
if (!addrlen)
return NULL;
len += addrlen;
......@@ -2089,11 +2166,7 @@ struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
return retval;
}
/*
* Unpack the parameters in an ASCONF chunk into an association and
* generate ASCONF-ACK chunk.
*
* ADDIP 3.1.2 Address Configuration Acknowledgement Chunk (ASCONF-ACK)
/* ADDIP 3.1.2 Address Configuration Acknowledgement Chunk (ASCONF-ACK)
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
......@@ -2110,8 +2183,28 @@ struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
* | ASCONF Parameter Response#N |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* All the parameter respoinces will be added in this function.
* Create an ASCONF_ACK chunk with enough space for the parameter responses.
*/
struct sctp_chunk *sctp_make_asconf_ack(struct sctp_association *asoc,
int serial, int vparam_len)
{
sctp_addiphdr_t asconf;
struct sctp_chunk *retval;
int length = sizeof(asconf) + vparam_len;
/* Create the chunk. */
retval = sctp_make_chunk(asoc, SCTP_CID_ASCONF_ACK, 0, length);
if (!retval)
return NULL;
asconf.serial = serial;
retval->subh.addip_hdr =
sctp_addto_chunk(retval, sizeof(asconf), &asconf);
return retval;
}
struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
struct sctp_chunk *asconf,
int vparam_len)
......
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2003
* Copyright (c) 1999 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2002 International Business Machines Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
......@@ -690,6 +690,44 @@ static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds,
chunk->transport = t;
}
/* Process an incoming Operation Error Chunk. */
static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc,
struct sctp_chunk *chunk)
{
struct sctp_operr_chunk *operr_chunk;
struct sctp_errhdr *err_hdr;
operr_chunk = (struct sctp_operr_chunk *)chunk->chunk_hdr;
err_hdr = &operr_chunk->err_hdr;
switch (err_hdr->cause) {
case SCTP_ERROR_UNKNOWN_CHUNK:
{
struct sctp_chunkhdr *unk_chunk_hdr;
unk_chunk_hdr = (struct sctp_chunkhdr *)err_hdr->variable;
switch (unk_chunk_hdr->type) {
/* ADDIP 4.1 A9) If the peer responds to an ASCONF with an
* ERROR chunk reporting that it did not recognized the ASCONF
* chunk type, the sender of the ASCONF MUST NOT send any
* further ASCONF chunks and MUST stop its T-4 timer.
*/
case SCTP_CID_ASCONF:
asoc->peer.asconf_capable = 0;
sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
break;
default:
break;
}
break;
}
default:
break;
}
}
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
......@@ -1205,6 +1243,9 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
sctp_cmd_setup_t4(commands, asoc, cmd->obj.ptr);
break;
case SCTP_CMD_PROCESS_OPERR:
sctp_cmd_process_operr(commands, asoc, chunk);
break;
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
......
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2003
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2002 International Business Machines, Corp.
* Copyright (c) 2001-2002 Intel Corp.
* Copyright (c) 2002 Nokia Corp.
*
......@@ -2864,6 +2864,9 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
sctp_ulpevent_free(ev);
goto nomem;
}
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
SCTP_CHUNK(chunk));
}
return SCTP_DISPOSITION_CONSUME;
......
......@@ -97,6 +97,8 @@ static void sctp_wait_for_close(struct sock *sk, long timeo);
static inline int sctp_verify_addr(struct sock *, union sctp_addr *, int);
static int sctp_bindx_add(struct sock *, struct sockaddr *, int);
static int sctp_bindx_rem(struct sock *, struct sockaddr *, int);
static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int);
static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int);
static int sctp_do_bind(struct sock *, union sctp_addr *, int);
static int sctp_autobind(struct sock *sk);
static void sctp_sock_migrate(struct sock *, struct sock *,
......@@ -349,6 +351,106 @@ int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
return retval;
}
/* Send an ASCONF chunk with Add IP address parameters to all the peers of the
* associations that are part of the endpoint indicating that a list of local
* addresses are added to the endpoint.
*
* If any of the addresses is already in the bind address list of the
* association, we do not send the chunk for that association. But it will not
* affect other associations.
*
* Only sctp_setsockopt_bindx() is supposed to call this function.
*/
static int sctp_send_asconf_add_ip(struct sock *sk,
struct sockaddr *addrs,
int addrcnt)
{
struct sctp_opt *sp;
struct sctp_endpoint *ep;
struct sctp_association *asoc;
struct sctp_bind_addr *bp;
struct sctp_chunk *chunk;
struct sctp_sockaddr_entry *laddr;
union sctp_addr *addr;
void *addr_buf;
struct sctp_af *af;
struct list_head *pos;
struct list_head *p;
int i;
int retval = 0;
sp = sctp_sk(sk);
ep = sp->ep;
SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n",
__FUNCTION__, sk, addrs, addrcnt);
list_for_each(pos, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
if (!sctp_state(asoc, ESTABLISHED))
continue;
if (!asoc->peer.asconf_capable)
continue;
/* Check if any address in the packed array of addresses is
* in the bind address list of the association. If so,
* do not send the asconf chunk to its peer, but continue with
* other associations.
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
addr = (union sctp_addr *)addr_buf;
af = sctp_get_af_specific(addr->v4.sin_family);
if (!af) {
retval = -EINVAL;
goto out;
}
if (sctp_assoc_lookup_laddr(asoc, addr))
break;
addr_buf += af->sockaddr_len;
}
if (i < addrcnt)
continue;
/* Use the first address in bind addr list of association as
* Address Parameter of ASCONF CHUNK.
*/
sctp_read_lock(&asoc->base.addr_lock);
bp = &asoc->base.bind_addr;
p = bp->address_list.next;
laddr = list_entry(p, struct sctp_sockaddr_entry, list);
sctp_read_unlock(&asoc->base.addr_lock);
chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs,
addrcnt, SCTP_PARAM_ADD_IP);
if (!chunk) {
retval = -ENOMEM;
goto out;
}
retval = sctp_primitive_ASCONF(asoc, chunk);
if (retval) {
sctp_chunk_free(chunk);
goto out;
}
/* FIXME: After sending the add address ASCONF chunk, we
* cannot append the address to the association's binding
* address list, because the new address may be used as the
* source of a message sent to the peer before the ASCONF
* chunk is received by the peer. So we should wait until
* ASCONF_ACK is received.
*/
}
out:
return retval;
}
/* Remove a list of addresses from bind addresses list. Do not remove the
* last address.
*
......@@ -436,6 +538,106 @@ int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
return retval;
}
/* Send an ASCONF chunk with Delete IP address parameters to all the peers of
* the associations that are part of the endpoint indicating that a list of
* local addresses are removed from the endpoint.
*
* If any of the addresses is already in the bind address list of the
* association, we do not send the chunk for that association. But it will not
* affect other associations.
*
* Only sctp_setsockopt_bindx() is supposed to call this function.
*/
static int sctp_send_asconf_del_ip(struct sock *sk,
struct sockaddr *addrs,
int addrcnt)
{
struct sctp_opt *sp;
struct sctp_endpoint *ep;
struct sctp_association *asoc;
struct sctp_bind_addr *bp;
struct sctp_chunk *chunk;
union sctp_addr *laddr;
void *addr_buf;
struct sctp_af *af;
struct list_head *pos;
int i;
int retval = 0;
sp = sctp_sk(sk);
ep = sp->ep;
SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n",
__FUNCTION__, sk, addrs, addrcnt);
list_for_each(pos, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
if (!sctp_state(asoc, ESTABLISHED))
continue;
if (!asoc->peer.asconf_capable)
continue;
/* Check if any address in the packed array of addresses is
* not present in the bind address list of the association.
* If so, do not send the asconf chunk to its peer, but
* continue with other associations.
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
laddr = (union sctp_addr *)addr_buf;
af = sctp_get_af_specific(laddr->v4.sin_family);
if (!af) {
retval = -EINVAL;
goto out;
}
if (!sctp_assoc_lookup_laddr(asoc, laddr))
break;
addr_buf += af->sockaddr_len;
}
if (i < addrcnt)
continue;
/* Find one address in the association's bind address list
* that is not in the packed array of addresses. This is to
* make sure that we do not delete all the addresses in the
* association.
*/
sctp_read_lock(&asoc->base.addr_lock);
bp = &asoc->base.bind_addr;
laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
addrcnt, sp);
sctp_read_unlock(&asoc->base.addr_lock);
if (!laddr)
continue;
chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt,
SCTP_PARAM_DEL_IP);
if (!chunk) {
retval = -ENOMEM;
goto out;
}
retval = sctp_primitive_ASCONF(asoc, chunk);
if (retval) {
sctp_chunk_free(chunk);
goto out;
}
/* FIXME: After sending the delete address ASCONF chunk, we
* cannot remove the addresses from the association's bind
* address list, because there maybe some packet send to
* the delete addresses, so we should wait until ASCONF_ACK
* packet is received.
*/
}
out:
return retval;
}
/* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
*
* API 8.1
......@@ -564,10 +766,16 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk, struct sockaddr *addrs,
switch (op) {
case SCTP_BINDX_ADD_ADDR:
err = sctp_bindx_add(sk, kaddrs, addrcnt);
if (err)
goto out;
err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt);
break;
case SCTP_BINDX_REM_ADDR:
err = sctp_bindx_rem(sk, kaddrs, addrcnt);
if (err)
goto out;
err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt);
break;
default:
......@@ -575,6 +783,7 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk, struct sockaddr *addrs,
break;
};
out:
kfree(kaddrs);
return err;
......@@ -962,8 +1171,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
= sinit->sinit_max_attempts;
}
if (sinit->sinit_max_init_timeo) {
asoc->max_init_timeo
= sinit->sinit_max_init_timeo * HZ;
asoc->max_init_timeo =
MSECS_TO_JIFFIES(sinit->sinit_max_init_timeo);
}
}
......@@ -1401,7 +1610,8 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
*/
if (params.spp_hbinterval) {
trans->hb_allowed = 1;
trans->hb_interval = params.spp_hbinterval * HZ / 1000;
trans->hb_interval =
MSECS_TO_JIFFIES(params.spp_hbinterval);
} else
trans->hb_allowed = 0;
}
......@@ -1560,11 +1770,12 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char *optval, int optlen) {
if (asoc) {
if (rtoinfo.srto_initial != 0)
asoc->rto_initial = rtoinfo.srto_initial * HZ / 1000;
asoc->rto_initial =
MSECS_TO_JIFFIES(rtoinfo.srto_initial);
if (rtoinfo.srto_max != 0)
asoc->rto_max = rtoinfo.srto_max * HZ / 1000;
asoc->rto_max = MSECS_TO_JIFFIES(rtoinfo.srto_max);
if (rtoinfo.srto_min != 0)
asoc->rto_min = rtoinfo.srto_min * HZ / 1000;
asoc->rto_min = MSECS_TO_JIFFIES(rtoinfo.srto_min);
} else {
/* If there is no association or the association-id = 0
* set the values to the endpoint.
......@@ -2088,14 +2299,14 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
sp->initmsg.sinit_num_ostreams = sctp_max_outstreams;
sp->initmsg.sinit_max_instreams = sctp_max_instreams;
sp->initmsg.sinit_max_attempts = sctp_max_retrans_init;
sp->initmsg.sinit_max_init_timeo = (sctp_rto_max / HZ) * 1000;
sp->initmsg.sinit_max_init_timeo = JIFFIES_TO_MSECS(sctp_rto_max);
/* Initialize default RTO related parameters. These parameters can
* be modified for with the SCTP_RTOINFO socket option.
*/
sp->rtoinfo.srto_initial = (sctp_rto_initial / HZ) * 1000;
sp->rtoinfo.srto_max = (sctp_rto_max / HZ) * 1000;
sp->rtoinfo.srto_min = (sctp_rto_min / HZ) * 1000;
sp->rtoinfo.srto_initial = JIFFIES_TO_MSECS(sctp_rto_initial);
sp->rtoinfo.srto_max = JIFFIES_TO_MSECS(sctp_rto_max);
sp->rtoinfo.srto_min = JIFFIES_TO_MSECS(sctp_rto_min);
/* Initialize default association related parameters. These parameters
* can be modified with the SCTP_ASSOCINFO socket option.
......@@ -2104,8 +2315,8 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
sp->assocparams.sasoc_number_peer_destinations = 0;
sp->assocparams.sasoc_peer_rwnd = 0;
sp->assocparams.sasoc_local_rwnd = 0;
sp->assocparams.sasoc_cookie_life = (sctp_valid_cookie_life / HZ)
* 1000;
sp->assocparams.sasoc_cookie_life =
JIFFIES_TO_MSECS(sctp_valid_cookie_life);
/* Initialize default event subscriptions. By default, all the
* options are off.
......@@ -2115,7 +2326,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
/* Default Peer Address Parameters. These defaults can
* be modified via SCTP_PEER_ADDR_PARAMS
*/
sp->paddrparam.spp_hbinterval = (sctp_hb_interval / HZ) * 1000;
sp->paddrparam.spp_hbinterval = JIFFIES_TO_MSECS(sctp_hb_interval);
sp->paddrparam.spp_pathmaxrxt = sctp_max_retrans_path;
/* If enabled no SCTP message fragmentation will be performed.
......@@ -2265,7 +2476,7 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, char *optval,
status.sstat_primary.spinfo_state = transport->active;
status.sstat_primary.spinfo_cwnd = transport->cwnd;
status.sstat_primary.spinfo_srtt = transport->srtt;
status.sstat_primary.spinfo_rto = (transport->rto / HZ) * 1000;
status.sstat_primary.spinfo_rto = JIFFIES_TO_MSECS(transport->rto);
status.sstat_primary.spinfo_mtu = transport->pmtu;
if (put_user(len, optlen)) {
......@@ -2320,7 +2531,7 @@ static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
pinfo.spinfo_state = transport->active;
pinfo.spinfo_cwnd = transport->cwnd;
pinfo.spinfo_srtt = transport->srtt;
pinfo.spinfo_rto = (transport->rto / HZ) * 1000;
pinfo.spinfo_rto = JIFFIES_TO_MSECS(transport->rto);
pinfo.spinfo_mtu = transport->pmtu;
if (put_user(len, optlen)) {
......@@ -2524,7 +2735,7 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
if (!trans->hb_allowed)
params.spp_hbinterval = 0;
else
params.spp_hbinterval = trans->hb_interval * 1000 / HZ;
params.spp_hbinterval = JIFFIES_TO_MSECS(trans->hb_interval);
/* spp_pathmaxrxt contains the maximum number of retransmissions
* before this address shall be considered unreachable.
......@@ -2582,10 +2793,8 @@ static int sctp_getsockopt_peer_addrs_num(struct sock *sk, int len,
list_for_each(pos, &asoc->peer.transport_addr_list) {
cnt ++;
}
if (copy_to_user(optval, &cnt, sizeof(int)))
return -EFAULT;
return 0;
return cnt;
}
static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
......@@ -2666,10 +2875,8 @@ static int sctp_getsockopt_local_addrs_num(struct sock *sk, int len,
list_for_each(pos, &bp->address_list) {
cnt ++;
}
if (copy_to_user(optval, &cnt, sizeof(int)))
return -EFAULT;
return 0;
return cnt;
}
static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
......@@ -2879,9 +3086,9 @@ static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, char *optval,
/* Values corresponding to the specific association. */
if (asoc) {
rtoinfo.srto_initial = (asoc->rto_initial / HZ) * 1000;
rtoinfo.srto_max = (asoc->rto_max / HZ) * 1000;
rtoinfo.srto_min = (asoc->rto_min / HZ) * 1000;
rtoinfo.srto_initial = JIFFIES_TO_MSECS(asoc->rto_initial);
rtoinfo.srto_max = JIFFIES_TO_MSECS(asoc->rto_max);
rtoinfo.srto_min = JIFFIES_TO_MSECS(asoc->rto_min);
} else {
/* Values corresponding to the endpoint. */
struct sctp_opt *sp = sctp_sk(sk);
......
......@@ -44,7 +44,7 @@
#include <linux/sysctl.h>
static ctl_handler sctp_sysctl_jiffies_ms;
static long rto_timer_min = 0;
static long rto_timer_min = 1;
static long rto_timer_max = 86400000; /* One day */
static ctl_table sctp_table[] = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment