Commit 334f8b2a authored by David S. Miller's avatar David S. Miller
parents 7477fd2e ef1a5a50
......@@ -6,11 +6,13 @@
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/if.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <net/net_namespace.h>
#endif
#include <linux/compiler.h>
......@@ -76,7 +78,6 @@ extern void netfilter_init(void);
#define NF_MAX_HOOKS 8
struct sk_buff;
struct net_device;
typedef unsigned int nf_hookfn(unsigned int hooknum,
struct sk_buff *skb,
......@@ -233,6 +234,11 @@ struct nf_afinfo {
unsigned short family;
__sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
__sum16 (*checksum_partial)(struct sk_buff *skb,
unsigned int hook,
unsigned int dataoff,
unsigned int len,
u_int8_t protocol);
int (*route)(struct dst_entry **dst, struct flowi *fl);
void (*saveroute)(const struct sk_buff *skb,
struct nf_queue_entry *entry);
......@@ -262,6 +268,23 @@ nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff,
return csum;
}
static inline __sum16
nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, unsigned int len,
u_int8_t protocol, unsigned short family)
{
const struct nf_afinfo *afinfo;
__sum16 csum = 0;
rcu_read_lock();
afinfo = nf_get_afinfo(family);
if (afinfo)
csum = afinfo->checksum_partial(skb, hook, dataoff, len,
protocol);
rcu_read_unlock();
return csum;
}
extern int nf_register_afinfo(const struct nf_afinfo *afinfo);
extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
......@@ -320,5 +343,56 @@ extern void (*nf_ct_destroy)(struct nf_conntrack *);
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
#endif
static inline struct net *nf_pre_routing_net(const struct net_device *in,
const struct net_device *out)
{
#ifdef CONFIG_NET_NS
return in->nd_net;
#else
return &init_net;
#endif
}
static inline struct net *nf_local_in_net(const struct net_device *in,
const struct net_device *out)
{
#ifdef CONFIG_NET_NS
return in->nd_net;
#else
return &init_net;
#endif
}
static inline struct net *nf_forward_net(const struct net_device *in,
const struct net_device *out)
{
#ifdef CONFIG_NET_NS
BUG_ON(in->nd_net != out->nd_net);
return in->nd_net;
#else
return &init_net;
#endif
}
static inline struct net *nf_local_out_net(const struct net_device *in,
const struct net_device *out)
{
#ifdef CONFIG_NET_NS
return out->nd_net;
#else
return &init_net;
#endif
}
static inline struct net *nf_post_routing_net(const struct net_device *in,
const struct net_device *out)
{
#ifdef CONFIG_NET_NS
return out->nd_net;
#else
return &init_net;
#endif
}
#endif /*__KERNEL__*/
#endif /*__LINUX_NETFILTER_H*/
#ifndef _NF_CONNTRACK_DCCP_H
#define _NF_CONNTRACK_DCCP_H
/* Exposed to userspace over nfnetlink */
enum ct_dccp_states {
CT_DCCP_NONE,
CT_DCCP_REQUEST,
CT_DCCP_RESPOND,
CT_DCCP_PARTOPEN,
CT_DCCP_OPEN,
CT_DCCP_CLOSEREQ,
CT_DCCP_CLOSING,
CT_DCCP_TIMEWAIT,
CT_DCCP_IGNORE,
CT_DCCP_INVALID,
__CT_DCCP_MAX
};
#define CT_DCCP_MAX (__CT_DCCP_MAX - 1)
enum ct_dccp_roles {
CT_DCCP_ROLE_CLIENT,
CT_DCCP_ROLE_SERVER,
__CT_DCCP_ROLE_MAX
};
#define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1)
#ifdef __KERNEL__
#include <net/netfilter/nf_conntrack_tuple.h>
struct nf_ct_dccp {
u_int8_t role[IP_CT_DIR_MAX];
u_int8_t state;
u_int8_t last_pkt;
u_int8_t last_dir;
u_int64_t handshake_seq;
};
#endif /* __KERNEL__ */
#endif /* _NF_CONNTRACK_DCCP_H */
......@@ -80,6 +80,7 @@ enum ctattr_l4proto {
enum ctattr_protoinfo {
CTA_PROTOINFO_UNSPEC,
CTA_PROTOINFO_TCP,
CTA_PROTOINFO_DCCP,
__CTA_PROTOINFO_MAX
};
#define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1)
......@@ -95,6 +96,13 @@ enum ctattr_protoinfo_tcp {
};
#define CTA_PROTOINFO_TCP_MAX (__CTA_PROTOINFO_TCP_MAX - 1)
enum ctattr_protoinfo_dccp {
CTA_PROTOINFO_DCCP_UNSPEC,
CTA_PROTOINFO_DCCP_STATE,
__CTA_PROTOINFO_DCCP_MAX,
};
#define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1)
enum ctattr_counters {
CTA_COUNTERS_UNSPEC,
CTA_COUNTERS_PACKETS, /* old 64bit counters */
......
......@@ -430,13 +430,13 @@ extern int xt_compat_add_offset(int af, unsigned int offset, short delta);
extern void xt_compat_flush_offsets(int af);
extern short xt_compat_calc_jump(int af, unsigned int offset);
extern int xt_compat_match_offset(struct xt_match *match);
extern int xt_compat_match_offset(const struct xt_match *match);
extern int xt_compat_match_from_user(struct xt_entry_match *m,
void **dstptr, unsigned int *size);
extern int xt_compat_match_to_user(struct xt_entry_match *m,
void __user **dstptr, unsigned int *size);
extern int xt_compat_target_offset(struct xt_target *target);
extern int xt_compat_target_offset(const struct xt_target *target);
extern void xt_compat_target_from_user(struct xt_entry_target *t,
void **dstptr, unsigned int *size);
extern int xt_compat_target_to_user(struct xt_entry_target *t,
......
......@@ -37,68 +37,54 @@ struct xt_sctp_info {
#define SCTP_CHUNKMAP_SET(chunkmap, type) \
do { \
chunkmap[type / bytes(u_int32_t)] |= \
(chunkmap)[type / bytes(u_int32_t)] |= \
1 << (type % bytes(u_int32_t)); \
} while (0)
#define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \
do { \
chunkmap[type / bytes(u_int32_t)] &= \
(chunkmap)[type / bytes(u_int32_t)] &= \
~(1 << (type % bytes(u_int32_t))); \
} while (0)
#define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \
({ \
(chunkmap[type / bytes (u_int32_t)] & \
((chunkmap)[type / bytes (u_int32_t)] & \
(1 << (type % bytes (u_int32_t)))) ? 1: 0; \
})
#define SCTP_CHUNKMAP_RESET(chunkmap) \
do { \
int i; \
for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \
chunkmap[i] = 0; \
} while (0)
memset((chunkmap), 0, sizeof(chunkmap))
#define SCTP_CHUNKMAP_SET_ALL(chunkmap) \
do { \
int i; \
for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \
chunkmap[i] = ~0; \
} while (0)
memset((chunkmap), ~0U, sizeof(chunkmap))
#define SCTP_CHUNKMAP_COPY(destmap, srcmap) \
do { \
int i; \
for (i = 0; i < ARRAY_SIZE(srcmap); i++) \
destmap[i] = srcmap[i]; \
} while (0)
memcpy((destmap), (srcmap), sizeof(srcmap))
#define SCTP_CHUNKMAP_IS_CLEAR(chunkmap) \
({ \
int i; \
int flag = 1; \
for (i = 0; i < ARRAY_SIZE(chunkmap); i++) { \
if (chunkmap[i]) { \
flag = 0; \
break; \
} \
} \
flag; \
})
__sctp_chunkmap_is_clear((chunkmap), ARRAY_SIZE(chunkmap))
static inline bool
__sctp_chunkmap_is_clear(const u_int32_t *chunkmap, unsigned int n)
{
unsigned int i;
for (i = 0; i < n; ++i)
if (chunkmap[i])
return false;
return true;
}
#define SCTP_CHUNKMAP_IS_ALL_SET(chunkmap) \
({ \
int i; \
int flag = 1; \
for (i = 0; i < ARRAY_SIZE(chunkmap); i++) { \
if (chunkmap[i] != ~0) { \
flag = 0; \
break; \
} \
} \
flag; \
})
__sctp_chunkmap_is_all_set((chunkmap), ARRAY_SIZE(chunkmap))
static inline bool
__sctp_chunkmap_is_all_set(const u_int32_t *chunkmap, unsigned int n)
{
unsigned int i;
for (i = 0; i < n; ++i)
if (chunkmap[i] != ~0U)
return false;
return true;
}
#endif /* _XT_SCTP_H_ */
......@@ -23,8 +23,6 @@
#define ARPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN
#define ARPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN
#define arpt_target xt_target
#define arpt_table xt_table
#define ARPT_DEV_ADDR_LEN_MAX 16
......@@ -266,20 +264,15 @@ struct arpt_error
.target.errorname = "ERROR", \
}
#define arpt_register_target(tgt) \
({ (tgt)->family = NF_ARP; \
xt_register_target(tgt); })
#define arpt_unregister_target(tgt) xt_unregister_target(tgt)
extern struct arpt_table *arpt_register_table(struct net *net,
struct arpt_table *table,
extern struct xt_table *arpt_register_table(struct net *net,
struct xt_table *table,
const struct arpt_replace *repl);
extern void arpt_unregister_table(struct arpt_table *table);
extern void arpt_unregister_table(struct xt_table *table);
extern unsigned int arpt_do_table(struct sk_buff *skb,
unsigned int hook,
const struct net_device *in,
const struct net_device *out,
struct arpt_table *table);
struct xt_table *table);
#define ARPT_ALIGN(s) XT_ALIGN(s)
......
#ifndef __LINUX_BRIDGE_EBT_NFLOG_H
#define __LINUX_BRIDGE_EBT_NFLOG_H
#define EBT_NFLOG_MASK 0x0
#define EBT_NFLOG_PREFIX_SIZE 64
#define EBT_NFLOG_WATCHER "nflog"
#define EBT_NFLOG_DEFAULT_GROUP 0x1
#define EBT_NFLOG_DEFAULT_THRESHOLD 1
struct ebt_nflog_info {
u_int32_t len;
u_int16_t group;
u_int16_t threshold;
u_int16_t flags;
u_int16_t pad;
char prefix[EBT_NFLOG_PREFIX_SIZE];
};
#endif /* __LINUX_BRIDGE_EBT_NFLOG_H */
......@@ -62,8 +62,6 @@ enum nf_ip_hook_priorities {
NF_IP_PRI_FILTER = 0,
NF_IP_PRI_NAT_SRC = 100,
NF_IP_PRI_SELINUX_LAST = 225,
NF_IP_PRI_CONNTRACK_HELPER = INT_MAX - 2,
NF_IP_PRI_NAT_SEQ_ADJUST = INT_MAX - 1,
NF_IP_PRI_CONNTRACK_CONFIRM = INT_MAX,
NF_IP_PRI_LAST = INT_MAX,
};
......
......@@ -20,6 +20,7 @@
#include <asm/atomic.h>
#include <linux/netfilter/nf_conntrack_tcp.h>
#include <linux/netfilter/nf_conntrack_dccp.h>
#include <linux/netfilter/nf_conntrack_sctp.h>
#include <linux/netfilter/nf_conntrack_proto_gre.h>
#include <net/netfilter/ipv4/nf_conntrack_icmp.h>
......@@ -30,6 +31,7 @@
/* per conntrack: protocol private data */
union nf_conntrack_proto {
/* insert conntrack proto private data here */
struct nf_ct_dccp dccp;
struct ip_ct_sctp sctp;
struct ip_ct_tcp tcp;
struct ip_ct_icmp icmp;
......@@ -63,14 +65,7 @@ union nf_conntrack_help {
#include <linux/timer.h>
#ifdef CONFIG_NETFILTER_DEBUG
#define NF_CT_ASSERT(x) \
do { \
if (!(x)) \
/* Wooah! I'm tripping my conntrack in a frenzy of \
netplay... */ \
printk("NF_CT_ASSERT: %s:%i(%s)\n", \
__FILE__, __LINE__, __FUNCTION__); \
} while(0)
#define NF_CT_ASSERT(x) WARN_ON(!(x))
#else
#define NF_CT_ASSERT(x)
#endif
......@@ -145,6 +140,16 @@ nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash)
tuplehash[hash->tuple.dst.dir]);
}
static inline u_int16_t nf_ct_l3num(const struct nf_conn *ct)
{
return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
}
static inline u_int8_t nf_ct_protonum(const struct nf_conn *ct)
{
return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
}
/* get master conntrack via master expectation */
#define master_ct(conntr) (conntr->master)
......@@ -189,11 +194,10 @@ extern void nf_conntrack_hash_insert(struct nf_conn *ct);
extern void nf_conntrack_flush(void);
extern int nf_ct_get_tuplepr(const struct sk_buff *skb,
unsigned int nhoff,
u_int16_t l3num,
extern bool nf_ct_get_tuplepr(const struct sk_buff *skb,
unsigned int nhoff, u_int16_t l3num,
struct nf_conntrack_tuple *tuple);
extern int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
extern bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig);
extern void __nf_ct_refresh_acct(struct nf_conn *ct,
......
......@@ -30,7 +30,7 @@ extern void nf_conntrack_cleanup(void);
extern int nf_conntrack_proto_init(void);
extern void nf_conntrack_proto_fini(void);
extern int
extern bool
nf_ct_get_tuple(const struct sk_buff *skb,
unsigned int nhoff,
unsigned int dataoff,
......@@ -40,7 +40,7 @@ nf_ct_get_tuple(const struct sk_buff *skb,
const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto);
extern int
extern bool
nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig,
const struct nf_conntrack_l3proto *l3proto,
......
......@@ -28,31 +28,20 @@ struct nf_conntrack_l3proto
* Try to fill in the third arg: nhoff is offset of l3 proto
* hdr. Return true if possible.
*/
int (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int nhoff,
bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int nhoff,
struct nf_conntrack_tuple *tuple);
/*
* Invert the per-proto part of the tuple: ie. turn xmit into reply.
* Some packets can't be inverted: return 0 in that case.
*/
int (*invert_tuple)(struct nf_conntrack_tuple *inverse,
bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig);
/* Print out the per-protocol part of the tuple. */
int (*print_tuple)(struct seq_file *s,
const struct nf_conntrack_tuple *);
/* Returns verdict for packet, or -1 for invalid. */
int (*packet)(struct nf_conn *ct,
const struct sk_buff *skb,
enum ip_conntrack_info ctinfo);
/*
* Called when a new connection for this protocol found;
* returns TRUE if it's OK. If so, packet() called next.
*/
int (*new)(struct nf_conn *ct, const struct sk_buff *skb);
/*
* Called before tracking.
* *dataoff: offset of protocol header (TCP, UDP,...) in skb
......
......@@ -25,14 +25,13 @@ struct nf_conntrack_l4proto
/* Try to fill in the third arg: dataoff is offset past network protocol
hdr. Return true if possible. */
int (*pkt_to_tuple)(const struct sk_buff *skb,
unsigned int dataoff,
bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff,
struct nf_conntrack_tuple *tuple);
/* Invert the per-proto part of the tuple: ie. turn xmit into reply.
* Some packets can't be inverted: return 0 in that case.
*/
int (*invert_tuple)(struct nf_conntrack_tuple *inverse,
bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig);
/* Returns verdict for packet, or -1 for invalid. */
......@@ -45,7 +44,7 @@ struct nf_conntrack_l4proto
/* Called when a new connection for this protocol found;
* returns TRUE if it's OK. If so, packet() called next. */
int (*new)(struct nf_conn *ct, const struct sk_buff *skb,
bool (*new)(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff);
/* Called when a conntrack entry is destroyed */
......
......@@ -39,6 +39,9 @@ union nf_conntrack_man_proto
struct {
__be16 id;
} icmp;
struct {
__be16 port;
} dccp;
struct {
__be16 port;
} sctp;
......@@ -77,6 +80,9 @@ struct nf_conntrack_tuple
struct {
u_int8_t type, code;
} icmp;
struct {
__be16 port;
} dccp;
struct {
__be16 port;
} sctp;
......@@ -145,8 +151,6 @@ static inline void nf_ct_dump_tuple(const struct nf_conntrack_tuple *t)
}
}
#define NF_CT_DUMP_TUPLE(tp) nf_ct_dump_tuple(tp)
/* If we're the first tuple, it's the original dir. */
#define NF_CT_DIRECTION(h) \
((enum ip_conntrack_dir)(h)->tuple.dst.dir)
......@@ -160,7 +164,7 @@ struct nf_conntrack_tuple_hash
#endif /* __KERNEL__ */
static inline int __nf_ct_tuple_src_equal(const struct nf_conntrack_tuple *t1,
static inline bool __nf_ct_tuple_src_equal(const struct nf_conntrack_tuple *t1,
const struct nf_conntrack_tuple *t2)
{
return (nf_inet_addr_cmp(&t1->src.u3, &t2->src.u3) &&
......@@ -168,7 +172,7 @@ static inline int __nf_ct_tuple_src_equal(const struct nf_conntrack_tuple *t1,
t1->src.l3num == t2->src.l3num);
}
static inline int __nf_ct_tuple_dst_equal(const struct nf_conntrack_tuple *t1,
static inline bool __nf_ct_tuple_dst_equal(const struct nf_conntrack_tuple *t1,
const struct nf_conntrack_tuple *t2)
{
return (nf_inet_addr_cmp(&t1->dst.u3, &t2->dst.u3) &&
......@@ -176,21 +180,23 @@ static inline int __nf_ct_tuple_dst_equal(const struct nf_conntrack_tuple *t1,
t1->dst.protonum == t2->dst.protonum);
}
static inline int nf_ct_tuple_equal(const struct nf_conntrack_tuple *t1,
static inline bool nf_ct_tuple_equal(const struct nf_conntrack_tuple *t1,
const struct nf_conntrack_tuple *t2)
{
return __nf_ct_tuple_src_equal(t1, t2) &&
__nf_ct_tuple_dst_equal(t1, t2);
}
static inline int nf_ct_tuple_mask_equal(const struct nf_conntrack_tuple_mask *m1,
static inline bool
nf_ct_tuple_mask_equal(const struct nf_conntrack_tuple_mask *m1,
const struct nf_conntrack_tuple_mask *m2)
{
return (nf_inet_addr_cmp(&m1->src.u3, &m2->src.u3) &&
m1->src.u.all == m2->src.u.all);
}
static inline int nf_ct_tuple_src_mask_cmp(const struct nf_conntrack_tuple *t1,
static inline bool
nf_ct_tuple_src_mask_cmp(const struct nf_conntrack_tuple *t1,
const struct nf_conntrack_tuple *t2,
const struct nf_conntrack_tuple_mask *mask)
{
......@@ -199,20 +205,21 @@ static inline int nf_ct_tuple_src_mask_cmp(const struct nf_conntrack_tuple *t1,
for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++) {
if ((t1->src.u3.all[count] ^ t2->src.u3.all[count]) &
mask->src.u3.all[count])
return 0;
return false;
}
if ((t1->src.u.all ^ t2->src.u.all) & mask->src.u.all)
return 0;
return false;
if (t1->src.l3num != t2->src.l3num ||
t1->dst.protonum != t2->dst.protonum)
return 0;
return false;
return 1;
return true;
}
static inline int nf_ct_tuple_mask_cmp(const struct nf_conntrack_tuple *t,
static inline bool
nf_ct_tuple_mask_cmp(const struct nf_conntrack_tuple *t,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple_mask *mask)
{
......
......@@ -24,6 +24,9 @@ extern int nf_nat_mangle_udp_packet(struct sk_buff *skb,
extern int nf_nat_seq_adjust(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo);
extern int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo);
/* Setup NAT on this expected conntrack so it follows master, but goes
* to port ct->master->saved_proto. */
......
......@@ -8,9 +8,6 @@ struct nf_nat_range;
struct nf_nat_protocol
{
/* Protocol name */
const char *name;
/* Protocol number. */
unsigned int protonum;
......@@ -18,13 +15,13 @@ struct nf_nat_protocol
/* Translate a packet to the target according to manip type.
Return true if succeeded. */
int (*manip_pkt)(struct sk_buff *skb,
bool (*manip_pkt)(struct sk_buff *skb,
unsigned int iphdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype);
/* Is the manipable part of the tuple between min and max incl? */
int (*in_range)(const struct nf_conntrack_tuple *tuple,
bool (*in_range)(const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype,
const union nf_conntrack_man_proto *min,
const union nf_conntrack_man_proto *max);
......@@ -33,7 +30,7 @@ struct nf_nat_protocol
maniptype), to give a unique tuple in the given range if
possible; return false if not. Per-protocol part of tuple
is initialized to the incoming packet. */
int (*unique_tuple)(struct nf_conntrack_tuple *tuple,
bool (*unique_tuple)(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct);
......@@ -62,9 +59,20 @@ extern int init_protocols(void) __init;
extern void cleanup_protocols(void);
extern const struct nf_nat_protocol *find_nat_proto(u_int16_t protonum);
extern int nf_nat_port_range_to_nlattr(struct sk_buff *skb,
extern bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype,
const union nf_conntrack_man_proto *min,
const union nf_conntrack_man_proto *max);
extern bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct,
u_int16_t *rover);
extern int nf_nat_proto_range_to_nlattr(struct sk_buff *skb,
const struct nf_nat_range *range);
extern int nf_nat_port_nlattr_to_range(struct nlattr *tb[],
extern int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
struct nf_nat_range *range);
#endif /*_NF_NAT_PROTO_H*/
......@@ -14,7 +14,4 @@ extern int nf_nat_rule_find(struct sk_buff *skb,
extern unsigned int
alloc_null_binding(struct nf_conn *ct, unsigned int hooknum);
extern unsigned int
alloc_null_binding_confirmed(struct nf_conn *ct, unsigned int hooknum);
#endif /* _NF_NAT_RULE_H */
......@@ -212,4 +212,18 @@ config BRIDGE_EBT_ULOG
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_NFLOG
tristate "ebt: nflog support"
depends on BRIDGE_NF_EBTABLES
help
This option enables the nflog watcher, which allows to LOG
messages through the netfilter logging API, which can use
either the old LOG target, the old ULOG target or nfnetlink_log
as backend.
This option adds the ulog watcher, that you can use in any rule
in any ebtables table.
To compile it as a module, choose M here. If unsure, say N.
endmenu
......@@ -30,3 +30,4 @@ obj-$(CONFIG_BRIDGE_EBT_SNAT) += ebt_snat.o
# watchers
obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o
obj-$(CONFIG_BRIDGE_EBT_ULOG) += ebt_ulog.o
obj-$(CONFIG_BRIDGE_EBT_NFLOG) += ebt_nflog.o
/*
* ebt_nflog
*
* Author:
* Peter Warasin <peter@endian.com>
*
* February, 2008
*
* Based on:
* xt_NFLOG.c, (C) 2006 by Patrick McHardy <kaber@trash.net>
* ebt_ulog.c, (C) 2004 by Bart De Schuymer <bdschuym@pandora.be>
*
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_nflog.h>
#include <net/netfilter/nf_log.h>
static void ebt_nflog(const struct sk_buff *skb,
unsigned int hooknr,
const struct net_device *in,
const struct net_device *out,
const void *data, unsigned int datalen)
{
struct ebt_nflog_info *info = (struct ebt_nflog_info *)data;
struct nf_loginfo li;
li.type = NF_LOG_TYPE_ULOG;
li.u.ulog.copy_len = info->len;
li.u.ulog.group = info->group;
li.u.ulog.qthreshold = info->threshold;
nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, "%s", info->prefix);
}
static int ebt_nflog_check(const char *tablename,
unsigned int hookmask,
const struct ebt_entry *e,
void *data, unsigned int datalen)
{
struct ebt_nflog_info *info = (struct ebt_nflog_info *)data;
if (datalen != EBT_ALIGN(sizeof(struct ebt_nflog_info)))
return -EINVAL;
if (info->flags & ~EBT_NFLOG_MASK)
return -EINVAL;
info->prefix[EBT_NFLOG_PREFIX_SIZE - 1] = '\0';
return 0;
}
static struct ebt_watcher nflog __read_mostly = {
.name = EBT_NFLOG_WATCHER,
.watcher = ebt_nflog,
.check = ebt_nflog_check,
.me = THIS_MODULE,
};
static int __init ebt_nflog_init(void)
{
return ebt_register_watcher(&nflog);
}
static void __exit ebt_nflog_fini(void)
{
ebt_unregister_watcher(&nflog);
}
module_init(ebt_nflog_init);
module_exit(ebt_nflog_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Peter Warasin <peter@endian.com>");
MODULE_DESCRIPTION("ebtables NFLOG netfilter logging module");
......@@ -46,7 +46,7 @@ static struct ebt_table broute_table =
.name = "broute",
.table = &initial_table,
.valid_hooks = 1 << NF_BR_BROUTING,
.lock = RW_LOCK_UNLOCKED,
.lock = __RW_LOCK_UNLOCKED(broute_table.lock),
.check = check,
.me = THIS_MODULE,
};
......
......@@ -55,7 +55,7 @@ static struct ebt_table frame_filter =
.name = "filter",
.table = &initial_table,
.valid_hooks = FILTER_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
.lock = __RW_LOCK_UNLOCKED(frame_filter.lock),
.check = check,
.me = THIS_MODULE,
};
......
......@@ -55,7 +55,7 @@ static struct ebt_table frame_nat =
.name = "nat",
.table = &initial_table,
.valid_hooks = NAT_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
.lock = __RW_LOCK_UNLOCKED(frame_nat.lock),
.check = check,
.me = THIS_MODULE,
};
......
......@@ -182,9 +182,31 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
}
return csum;
}
EXPORT_SYMBOL(nf_ip_checksum);
static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, unsigned int len,
u_int8_t protocol)
{
const struct iphdr *iph = ip_hdr(skb);
__sum16 csum = 0;
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
if (len == skb->len - dataoff)
return nf_ip_checksum(skb, hook, dataoff, protocol);
/* fall through */
case CHECKSUM_NONE:
skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol,
skb->len - dataoff, 0);
skb->ip_summed = CHECKSUM_NONE;
csum = __skb_checksum_complete_head(skb, dataoff + len);
if (!csum)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
return csum;
}
static int nf_ip_route(struct dst_entry **dst, struct flowi *fl)
{
return ip_route_output_key(&init_net, (struct rtable **)dst, fl);
......@@ -193,6 +215,7 @@ static int nf_ip_route(struct dst_entry **dst, struct flowi *fl)
static const struct nf_afinfo nf_ip_afinfo = {
.family = AF_INET,
.checksum = nf_ip_checksum,
.checksum_partial = nf_ip_checksum_partial,
.route = nf_ip_route,
.saveroute = nf_ip_saveroute,
.reroute = nf_ip_reroute,
......
......@@ -241,10 +241,25 @@ config NF_NAT_SNMP_BASIC
# <expr> '&&' <expr> (6)
#
# (6) Returns the result of min(/expr/, /expr/).
config NF_NAT_PROTO_DCCP
tristate
depends on NF_NAT && NF_CT_PROTO_DCCP
default NF_NAT && NF_CT_PROTO_DCCP
config NF_NAT_PROTO_GRE
tristate
depends on NF_NAT && NF_CT_PROTO_GRE
config NF_NAT_PROTO_UDPLITE
tristate
depends on NF_NAT && NF_CT_PROTO_UDPLITE
default NF_NAT && NF_CT_PROTO_UDPLITE
config NF_NAT_PROTO_SCTP
tristate
default NF_NAT && NF_CT_PROTO_SCTP
depends on NF_NAT && NF_CT_PROTO_SCTP
config NF_NAT_FTP
tristate
depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT
......
......@@ -10,7 +10,7 @@ nf_conntrack_ipv4-objs += nf_conntrack_l3proto_ipv4_compat.o
endif
endif
nf_nat-objs := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o
nf_nat-objs := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_common.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o
iptable_nat-objs := nf_nat_rule.o nf_nat_standalone.o
# connection tracking
......@@ -29,7 +29,10 @@ obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
# NAT protocols (nf_nat)
obj-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o
obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
obj-$(CONFIG_NF_NAT_PROTO_UDPLITE) += nf_nat_proto_udplite.o
obj-$(CONFIG_NF_NAT_PROTO_SCTP) += nf_nat_proto_sctp.o
# generic IP tables
obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
......
......@@ -59,7 +59,7 @@ do { \
#endif
static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
char *hdr_addr, int len)
const char *hdr_addr, int len)
{
int i, ret;
......@@ -80,8 +80,8 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
const char *outdev,
const struct arpt_arp *arpinfo)
{
char *arpptr = (char *)(arphdr + 1);
char *src_devaddr, *tgt_devaddr;
const char *arpptr = (char *)(arphdr + 1);
const char *src_devaddr, *tgt_devaddr;
__be32 src_ipaddr, tgt_ipaddr;
int i, ret;
......@@ -222,16 +222,16 @@ unsigned int arpt_do_table(struct sk_buff *skb,
unsigned int hook,
const struct net_device *in,
const struct net_device *out,
struct arpt_table *table)
struct xt_table *table)
{
static const char nulldevname[IFNAMSIZ];
unsigned int verdict = NF_DROP;
struct arphdr *arp;
const struct arphdr *arp;
bool hotdrop = false;
struct arpt_entry *e, *back;
const char *indev, *outdev;
void *table_base;
struct xt_table_info *private;
const struct xt_table_info *private;
if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
return NF_DROP;
......@@ -352,7 +352,7 @@ static int mark_source_chains(struct xt_table_info *newinfo,
e->counters.pcnt = pos;
for (;;) {
struct arpt_standard_target *t
const struct arpt_standard_target *t
= (void *)arpt_get_target(e);
int visited = e->comefrom & (1 << hook);
......@@ -437,7 +437,7 @@ static int mark_source_chains(struct xt_table_info *newinfo,
static inline int check_entry(struct arpt_entry *e, const char *name)
{
struct arpt_entry_target *t;
const struct arpt_entry_target *t;
if (!arp_checkentry(&e->arp)) {
duprintf("arp_tables: arp check failed %p %s.\n", e, name);
......@@ -457,7 +457,7 @@ static inline int check_entry(struct arpt_entry *e, const char *name)
static inline int check_target(struct arpt_entry *e, const char *name)
{
struct arpt_entry_target *t;
struct arpt_target *target;
struct xt_target *target;
int ret;
t = arpt_get_target(e);
......@@ -480,7 +480,7 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
unsigned int *i)
{
struct arpt_entry_target *t;
struct arpt_target *target;
struct xt_target *target;
int ret;
ret = check_entry(e, name);
......@@ -706,11 +706,11 @@ static void get_counters(const struct xt_table_info *t,
}
}
static inline struct xt_counters *alloc_counters(struct arpt_table *table)
static inline struct xt_counters *alloc_counters(struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
struct xt_table_info *private = table->private;
const struct xt_table_info *private = table->private;
/* We need atomic snapshot of counters: rest doesn't change
* (other than comefrom, which userspace doesn't care
......@@ -731,7 +731,7 @@ static inline struct xt_counters *alloc_counters(struct arpt_table *table)
}
static int copy_entries_to_user(unsigned int total_size,
struct arpt_table *table,
struct xt_table *table,
void __user *userptr)
{
unsigned int off, num;
......@@ -851,7 +851,7 @@ static int compat_table_info(const struct xt_table_info *info,
static int get_info(struct net *net, void __user *user, int *len, int compat)
{
char name[ARPT_TABLE_MAXNAMELEN];
struct arpt_table *t;
struct xt_table *t;
int ret;
if (*len != sizeof(struct arpt_getinfo)) {
......@@ -872,7 +872,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
"arptable_%s", name);
if (t && !IS_ERR(t)) {
struct arpt_getinfo info;
struct xt_table_info *private = t->private;
const struct xt_table_info *private = t->private;
#ifdef CONFIG_COMPAT
if (compat) {
......@@ -911,7 +911,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
{
int ret;
struct arpt_get_entries get;
struct arpt_table *t;
struct xt_table *t;
if (*len < sizeof(get)) {
duprintf("get_entries: %u < %Zu\n", *len, sizeof(get));
......@@ -927,7 +927,8 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
t = xt_find_table_lock(net, NF_ARP, get.name);
if (t && !IS_ERR(t)) {
struct xt_table_info *private = t->private;
const struct xt_table_info *private = t->private;
duprintf("t->private->number = %u\n",
private->number);
if (get.size == private->size)
......@@ -936,7 +937,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
else {
duprintf("get_entries: I've got %u not %u!\n",
private->size, get.size);
ret = -EINVAL;
ret = -EAGAIN;
}
module_put(t->me);
xt_table_unlock(t);
......@@ -953,7 +954,7 @@ static int __do_replace(struct net *net, const char *name,
void __user *counters_ptr)
{
int ret;
struct arpt_table *t;
struct xt_table *t;
struct xt_table_info *oldinfo;
struct xt_counters *counters;
void *loc_cpu_old_entry;
......@@ -1087,11 +1088,11 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
struct xt_counters_info tmp;
struct xt_counters *paddc;
unsigned int num_counters;
char *name;
const char *name;
int size;
void *ptmp;
struct arpt_table *t;
struct xt_table_info *private;
struct xt_table *t;
const struct xt_table_info *private;
int ret = 0;
void *loc_cpu_entry;
#ifdef CONFIG_COMPAT
......@@ -1554,11 +1555,11 @@ static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
}
static int compat_copy_entries_to_user(unsigned int total_size,
struct arpt_table *table,
struct xt_table *table,
void __user *userptr)
{
struct xt_counters *counters;
struct xt_table_info *private = table->private;
const struct xt_table_info *private = table->private;
void __user *pos;
unsigned int size;
int ret = 0;
......@@ -1592,7 +1593,7 @@ static int compat_get_entries(struct net *net,
{
int ret;
struct compat_arpt_get_entries get;
struct arpt_table *t;
struct xt_table *t;
if (*len < sizeof(get)) {
duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
......@@ -1609,7 +1610,7 @@ static int compat_get_entries(struct net *net,
xt_compat_lock(NF_ARP);
t = xt_find_table_lock(net, NF_ARP, get.name);
if (t && !IS_ERR(t)) {
struct xt_table_info *private = t->private;
const struct xt_table_info *private = t->private;
struct xt_table_info info;
duprintf("t->private->number = %u\n", private->number);
......@@ -1620,7 +1621,7 @@ static int compat_get_entries(struct net *net,
} else if (!ret) {
duprintf("compat_get_entries: I've got %u not %u!\n",
private->size, get.size);
ret = -EINVAL;
ret = -EAGAIN;
}
xt_compat_flush_offsets(NF_ARP);
module_put(t->me);
......@@ -1722,8 +1723,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
return ret;
}
struct arpt_table *arpt_register_table(struct net *net,
struct arpt_table *table,
struct xt_table *arpt_register_table(struct net *net, struct xt_table *table,
const struct arpt_replace *repl)
{
int ret;
......@@ -1766,7 +1766,7 @@ struct arpt_table *arpt_register_table(struct net *net,
return ERR_PTR(ret);
}
void arpt_unregister_table(struct arpt_table *table)
void arpt_unregister_table(struct xt_table *table)
{
struct xt_table_info *private;
void *loc_cpu_entry;
......@@ -1784,7 +1784,7 @@ void arpt_unregister_table(struct arpt_table *table)
}
/* The built-in targets: standard (NULL) and error. */
static struct arpt_target arpt_standard_target __read_mostly = {
static struct xt_target arpt_standard_target __read_mostly = {
.name = ARPT_STANDARD_TARGET,
.targetsize = sizeof(int),
.family = NF_ARP,
......@@ -1795,7 +1795,7 @@ static struct arpt_target arpt_standard_target __read_mostly = {
#endif
};
static struct arpt_target arpt_error_target __read_mostly = {
static struct xt_target arpt_error_target __read_mostly = {
.name = ARPT_ERROR_TARGET,
.target = arpt_error,
.targetsize = ARPT_FUNCTION_MAXNAMELEN,
......
......@@ -15,7 +15,7 @@ target(struct sk_buff *skb,
const void *targinfo)
{
const struct arpt_mangle *mangle = targinfo;
struct arphdr *arp;
const struct arphdr *arp;
unsigned char *arpptr;
int pln, hln;
......@@ -73,8 +73,9 @@ checkentry(const char *tablename, const void *e, const struct xt_target *target,
return true;
}
static struct arpt_target arpt_mangle_reg __read_mostly = {
static struct xt_target arpt_mangle_reg __read_mostly = {
.name = "mangle",
.family = NF_ARP,
.target = target,
.targetsize = sizeof(struct arpt_mangle),
.checkentry = checkentry,
......@@ -83,15 +84,12 @@ static struct arpt_target arpt_mangle_reg __read_mostly = {
static int __init arpt_mangle_init(void)
{
if (arpt_register_target(&arpt_mangle_reg))
return -EINVAL;
return 0;
return xt_register_target(&arpt_mangle_reg);
}
static void __exit arpt_mangle_fini(void)
{
arpt_unregister_target(&arpt_mangle_reg);
xt_unregister_target(&arpt_mangle_reg);
}
module_init(arpt_mangle_init);
......
......@@ -45,10 +45,10 @@ static struct
.term = ARPT_ERROR_INIT,
};
static struct arpt_table packet_filter = {
static struct xt_table packet_filter = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
.lock = __RW_LOCK_UNLOCKED(packet_filter.lock),
.private = NULL,
.me = THIS_MODULE,
.af = NF_ARP,
......@@ -70,18 +70,21 @@ static struct nf_hook_ops arpt_ops[] __read_mostly = {
.owner = THIS_MODULE,
.pf = NF_ARP,
.hooknum = NF_ARP_IN,
.priority = NF_IP_PRI_FILTER,
},
{
.hook = arpt_hook,
.owner = THIS_MODULE,
.pf = NF_ARP,
.hooknum = NF_ARP_OUT,
.priority = NF_IP_PRI_FILTER,
},
{
.hook = arpt_hook,
.owner = THIS_MODULE,
.pf = NF_ARP,
.hooknum = NF_ARP_FORWARD,
.priority = NF_IP_PRI_FILTER,
},
};
......
......@@ -296,7 +296,7 @@ static void trace_packet(struct sk_buff *skb,
struct ipt_entry *e)
{
void *table_base;
struct ipt_entry *root;
const struct ipt_entry *root;
char *hookname, *chainname, *comment;
unsigned int rulenum = 0;
......@@ -327,7 +327,7 @@ ipt_do_table(struct sk_buff *skb,
{
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
u_int16_t offset;
struct iphdr *ip;
const struct iphdr *ip;
u_int16_t datalen;
bool hotdrop = false;
/* Initializing verdict to NF_DROP keeps gcc happy. */
......@@ -926,7 +926,7 @@ static struct xt_counters * alloc_counters(struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
struct xt_table_info *private = table->private;
const struct xt_table_info *private = table->private;
/* We need atomic snapshot of counters: rest doesn't change
(other than comefrom, which userspace doesn't care
......@@ -953,9 +953,9 @@ copy_entries_to_user(unsigned int total_size,
unsigned int off, num;
struct ipt_entry *e;
struct xt_counters *counters;
struct xt_table_info *private = table->private;
const struct xt_table_info *private = table->private;
int ret = 0;
void *loc_cpu_entry;
const void *loc_cpu_entry;
counters = alloc_counters(table);
if (IS_ERR(counters))
......@@ -975,8 +975,8 @@ copy_entries_to_user(unsigned int total_size,
/* ... then go back and fix counters and names */
for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
unsigned int i;
struct ipt_entry_match *m;
struct ipt_entry_target *t;
const struct ipt_entry_match *m;
const struct ipt_entry_target *t;
e = (struct ipt_entry *)(loc_cpu_entry + off);
if (copy_to_user(userptr + off
......@@ -1116,7 +1116,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
"iptable_%s", name);
if (t && !IS_ERR(t)) {
struct ipt_getinfo info;
struct xt_table_info *private = t->private;
const struct xt_table_info *private = t->private;
#ifdef CONFIG_COMPAT
if (compat) {
......@@ -1172,7 +1172,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
t = xt_find_table_lock(net, AF_INET, get.name);
if (t && !IS_ERR(t)) {
struct xt_table_info *private = t->private;
const struct xt_table_info *private = t->private;
duprintf("t->private->number = %u\n", private->number);
if (get.size == private->size)
ret = copy_entries_to_user(private->size,
......@@ -1180,7 +1180,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
else {
duprintf("get_entries: I've got %u not %u!\n",
private->size, get.size);
ret = -EINVAL;
ret = -EAGAIN;
}
module_put(t->me);
xt_table_unlock(t);
......@@ -1337,11 +1337,11 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
struct xt_counters_info tmp;
struct xt_counters *paddc;
unsigned int num_counters;
char *name;
const char *name;
int size;
void *ptmp;
struct xt_table *t;
struct xt_table_info *private;
const struct xt_table_info *private;
int ret = 0;
void *loc_cpu_entry;
#ifdef CONFIG_COMPAT
......@@ -1878,11 +1878,11 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
void __user *userptr)
{
struct xt_counters *counters;
struct xt_table_info *private = table->private;
const struct xt_table_info *private = table->private;
void __user *pos;
unsigned int size;
int ret = 0;
void *loc_cpu_entry;
const void *loc_cpu_entry;
unsigned int i = 0;
counters = alloc_counters(table);
......@@ -1929,7 +1929,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
xt_compat_lock(AF_INET);
t = xt_find_table_lock(net, AF_INET, get.name);
if (t && !IS_ERR(t)) {
struct xt_table_info *private = t->private;
const struct xt_table_info *private = t->private;
struct xt_table_info info;
duprintf("t->private->number = %u\n", private->number);
ret = compat_table_info(private, &info);
......@@ -1939,7 +1939,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
} else if (!ret) {
duprintf("compat_get_entries: I've got %u not %u!\n",
private->size, get.size);
ret = -EINVAL;
ret = -EAGAIN;
}
xt_compat_flush_offsets(AF_INET);
module_put(t->me);
......@@ -2130,7 +2130,8 @@ icmp_match(const struct sk_buff *skb,
unsigned int protoff,
bool *hotdrop)
{
struct icmphdr _icmph, *ic;
const struct icmphdr *ic;
struct icmphdr _icmph;
const struct ipt_icmp *icmpinfo = matchinfo;
/* Must not be a fragment. */
......
......@@ -144,7 +144,7 @@ clusterip_config_init_nodelist(struct clusterip_config *c,
}
static struct clusterip_config *
clusterip_config_init(struct ipt_clusterip_tgt_info *i, __be32 ip,
clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
struct net_device *dev)
{
struct clusterip_config *c;
......@@ -333,7 +333,7 @@ clusterip_tg(struct sk_buff *skb, const struct net_device *in,
}
#ifdef DEBUG
NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
nf_ct_dump_tuple_ip(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
#endif
pr_debug("hash=%u ct_hash=%u ", hash, ct->mark);
if (!clusterip_responsible(cipinfo->config, hash)) {
......@@ -418,7 +418,7 @@ clusterip_tg_check(const char *tablename, const void *e_void,
/* drop reference count of cluster config when rule is deleted */
static void clusterip_tg_destroy(const struct xt_target *target, void *targinfo)
{
struct ipt_clusterip_tgt_info *cipinfo = targinfo;
const struct ipt_clusterip_tgt_info *cipinfo = targinfo;
/* if no more entries are referencing the config, remove it
* from the list and destroy the proc entry */
......@@ -567,7 +567,7 @@ struct clusterip_seq_position {
static void *clusterip_seq_start(struct seq_file *s, loff_t *pos)
{
struct proc_dir_entry *pde = s->private;
const struct proc_dir_entry *pde = s->private;
struct clusterip_config *c = pde->data;
unsigned int weight;
u_int32_t local_nodes;
......@@ -594,7 +594,7 @@ static void *clusterip_seq_start(struct seq_file *s, loff_t *pos)
static void *clusterip_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
struct clusterip_seq_position *idx = (struct clusterip_seq_position *)v;
struct clusterip_seq_position *idx = v;
*pos = ++idx->pos;
if (*pos >= idx->weight) {
......@@ -613,7 +613,7 @@ static void clusterip_seq_stop(struct seq_file *s, void *v)
static int clusterip_seq_show(struct seq_file *s, void *v)
{
struct clusterip_seq_position *idx = (struct clusterip_seq_position *)v;
struct clusterip_seq_position *idx = v;
if (idx->pos != 0)
seq_putc(s, ',');
......@@ -669,7 +669,7 @@ static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
{
#define PROC_WRITELEN 10
char buffer[PROC_WRITELEN+1];
struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
struct clusterip_config *c = pde->data;
unsigned long nodenum;
......
......@@ -100,7 +100,7 @@ ecn_tg_check(const char *tablename, const void *e_void,
const struct xt_target *target, void *targinfo,
unsigned int hook_mask)
{
const struct ipt_ECN_info *einfo = (struct ipt_ECN_info *)targinfo;
const struct ipt_ECN_info *einfo = targinfo;
const struct ipt_entry *e = e_void;
if (einfo->operation & IPT_ECN_OP_MASK) {
......
......@@ -76,7 +76,8 @@ static void dump_packet(const struct nf_loginfo *info,
if ((logflags & IPT_LOG_IPOPT)
&& ih->ihl * 4 > sizeof(struct iphdr)) {
unsigned char _opt[4 * 15 - sizeof(struct iphdr)], *op;
const unsigned char *op;
unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
unsigned int i, optsize;
optsize = ih->ihl * 4 - sizeof(struct iphdr);
......@@ -338,12 +339,16 @@ static void dump_packet(const struct nf_loginfo *info,
if ((logflags & IPT_LOG_UID) && !iphoff && skb->sk) {
read_lock_bh(&skb->sk->sk_callback_lock);
if (skb->sk->sk_socket && skb->sk->sk_socket->file)
printk("UID=%u GID=%u",
printk("UID=%u GID=%u ",
skb->sk->sk_socket->file->f_uid,
skb->sk->sk_socket->file->f_gid);
read_unlock_bh(&skb->sk->sk_callback_lock);
}
/* Max length: 16 "MARK=0xFFFFFFFF " */
if (!iphoff && skb->mark)
printk("MARK=0x%x ", skb->mark);
/* Proto Max log string length */
/* IP: 40+46+6+11+127 = 230 */
/* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */
......
......@@ -35,8 +35,10 @@ MODULE_DESCRIPTION("Xtables: packet \"rejection\" target for IPv4");
static void send_reset(struct sk_buff *oldskb, int hook)
{
struct sk_buff *nskb;
struct iphdr *oiph, *niph;
struct tcphdr _otcph, *oth, *tcph;
const struct iphdr *oiph;
struct iphdr *niph;
const struct tcphdr *oth;
struct tcphdr _otcph, *tcph;
unsigned int addr_type;
/* IP header checks: fragment. */
......
......@@ -340,7 +340,7 @@ static void *recent_seq_start(struct seq_file *seq, loff_t *pos)
static void *recent_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct recent_iter_state *st = seq->private;
struct recent_table *t = st->table;
const struct recent_table *t = st->table;
struct recent_entry *e = v;
struct list_head *head = e->list.next;
......@@ -361,7 +361,7 @@ static void recent_seq_stop(struct seq_file *s, void *v)
static int recent_seq_show(struct seq_file *seq, void *v)
{
struct recent_entry *e = v;
const struct recent_entry *e = v;
unsigned int i;
i = (e->index - 1) % ip_pkt_list_tot;
......@@ -396,7 +396,7 @@ static int recent_seq_open(struct inode *inode, struct file *file)
static ssize_t recent_proc_write(struct file *file, const char __user *input,
size_t size, loff_t *loff)
{
struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
struct recent_table *t = pde->data;
struct recent_entry *e;
char buf[sizeof("+255.255.255.255")], *c = buf;
......
......@@ -56,12 +56,23 @@ static struct
static struct xt_table packet_filter = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
.lock = __RW_LOCK_UNLOCKED(packet_filter.lock),
.me = THIS_MODULE,
.af = AF_INET,
};
/* The work comes in here from netfilter.c. */
static unsigned int
ipt_local_in_hook(unsigned int hook,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return ipt_do_table(skb, hook, in, out,
nf_local_in_net(in, out)->ipv4.iptable_filter);
}
static unsigned int
ipt_hook(unsigned int hook,
struct sk_buff *skb,
......@@ -69,7 +80,8 @@ ipt_hook(unsigned int hook,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_filter);
return ipt_do_table(skb, hook, in, out,
nf_forward_net(in, out)->ipv4.iptable_filter);
}
static unsigned int
......@@ -88,12 +100,13 @@ ipt_local_out_hook(unsigned int hook,
return NF_ACCEPT;
}
return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_filter);
return ipt_do_table(skb, hook, in, out,
nf_local_out_net(in, out)->ipv4.iptable_filter);
}
static struct nf_hook_ops ipt_ops[] __read_mostly = {
{
.hook = ipt_hook,
.hook = ipt_local_in_hook,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_IN,
......
......@@ -67,20 +67,54 @@ static struct
static struct xt_table packet_mangler = {
.name = "mangle",
.valid_hooks = MANGLE_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
.lock = __RW_LOCK_UNLOCKED(packet_mangler.lock),
.me = THIS_MODULE,
.af = AF_INET,
};
/* The work comes in here from netfilter.c. */
static unsigned int
ipt_route_hook(unsigned int hook,
ipt_pre_routing_hook(unsigned int hook,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_mangle);
return ipt_do_table(skb, hook, in, out,
nf_pre_routing_net(in, out)->ipv4.iptable_mangle);
}
static unsigned int
ipt_post_routing_hook(unsigned int hook,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return ipt_do_table(skb, hook, in, out,
nf_post_routing_net(in, out)->ipv4.iptable_mangle);
}
static unsigned int
ipt_local_in_hook(unsigned int hook,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return ipt_do_table(skb, hook, in, out,
nf_local_in_net(in, out)->ipv4.iptable_mangle);
}
static unsigned int
ipt_forward_hook(unsigned int hook,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return ipt_do_table(skb, hook, in, out,
nf_forward_net(in, out)->ipv4.iptable_mangle);
}
static unsigned int
......@@ -112,7 +146,8 @@ ipt_local_hook(unsigned int hook,
daddr = iph->daddr;
tos = iph->tos;
ret = ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_mangle);
ret = ipt_do_table(skb, hook, in, out,
nf_local_out_net(in, out)->ipv4.iptable_mangle);
/* Reroute for ANY change. */
if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) {
iph = ip_hdr(skb);
......@@ -130,21 +165,21 @@ ipt_local_hook(unsigned int hook,
static struct nf_hook_ops ipt_ops[] __read_mostly = {
{
.hook = ipt_route_hook,
.hook = ipt_pre_routing_hook,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_MANGLE,
},
{
.hook = ipt_route_hook,
.hook = ipt_local_in_hook,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_MANGLE,
},
{
.hook = ipt_route_hook,
.hook = ipt_forward_hook,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_FORWARD,
......@@ -158,7 +193,7 @@ static struct nf_hook_ops ipt_ops[] __read_mostly = {
.priority = NF_IP_PRI_MANGLE,
},
{
.hook = ipt_route_hook,
.hook = ipt_post_routing_hook,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_POST_ROUTING,
......
......@@ -39,7 +39,7 @@ static struct
static struct xt_table packet_raw = {
.name = "raw",
.valid_hooks = RAW_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
.lock = __RW_LOCK_UNLOCKED(packet_raw.lock),
.me = THIS_MODULE,
.af = AF_INET,
};
......@@ -52,7 +52,8 @@ ipt_hook(unsigned int hook,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_raw);
return ipt_do_table(skb, hook, in, out,
nf_pre_routing_net(in, out)->ipv4.iptable_raw);
}
static unsigned int
......@@ -70,7 +71,8 @@ ipt_local_hook(unsigned int hook,
"packet.\n");
return NF_ACCEPT;
}
return ipt_do_table(skb, hook, in, out, init_net.ipv4.iptable_raw);
return ipt_do_table(skb, hook, in, out,
nf_local_out_net(in, out)->ipv4.iptable_raw);
}
/* 'raw' is the very first table. */
......
......@@ -23,8 +23,14 @@
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/nf_nat_helper.h>
static int ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo);
EXPORT_SYMBOL_GPL(nf_nat_seq_adjust_hook);
static bool ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
struct nf_conntrack_tuple *tuple)
{
const __be32 *ap;
......@@ -32,21 +38,21 @@ static int ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
ap = skb_header_pointer(skb, nhoff + offsetof(struct iphdr, saddr),
sizeof(u_int32_t) * 2, _addrs);
if (ap == NULL)
return 0;
return false;
tuple->src.u3.ip = ap[0];
tuple->dst.u3.ip = ap[1];
return 1;
return true;
}
static int ipv4_invert_tuple(struct nf_conntrack_tuple *tuple,
static bool ipv4_invert_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig)
{
tuple->src.u3.ip = orig->dst.u3.ip;
tuple->dst.u3.ip = orig->src.u3.ip;
return 1;
return true;
}
static int ipv4_print_tuple(struct seq_file *s,
......@@ -100,36 +106,42 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
/* We've seen it coming out the other side: confirm it */
return nf_conntrack_confirm(skb);
}
static unsigned int ipv4_conntrack_help(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
const struct nf_conn_help *help;
const struct nf_conntrack_helper *helper;
unsigned int ret;
/* This is where we call the helper: as the packet goes out. */
ct = nf_ct_get(skb, &ctinfo);
if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)
return NF_ACCEPT;
goto out;
help = nfct_help(ct);
if (!help)
return NF_ACCEPT;
goto out;
/* rcu_read_lock()ed by nf_hook_slow */
helper = rcu_dereference(help->helper);
if (!helper)
return NF_ACCEPT;
return helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb),
goto out;
ret = helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb),
ct, ctinfo);
if (ret != NF_ACCEPT)
return ret;
if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) {
typeof(nf_nat_seq_adjust_hook) seq_adjust;
seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
if (!seq_adjust || !seq_adjust(skb, ct, ctinfo))
return NF_DROP;
}
out:
/* We've seen it coming out the other side: confirm it */
return nf_conntrack_confirm(skb);
}
static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
......@@ -210,20 +222,6 @@ static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_CONNTRACK,
},
{
.hook = ipv4_conntrack_help,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_HELPER,
},
{
.hook = ipv4_conntrack_help,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_CONNTRACK_HELPER,
},
{
.hook = ipv4_confirm,
.owner = THIS_MODULE,
......
......@@ -106,21 +106,16 @@ static int ct_seq_show(struct seq_file *s, void *v)
/* we only want to print DIR_ORIGINAL */
if (NF_CT_DIRECTION(hash))
return 0;
if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num != AF_INET)
if (nf_ct_l3num(ct) != AF_INET)
return 0;
l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL]
.tuple.src.l3num);
l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
NF_CT_ASSERT(l3proto);
l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL]
.tuple.src.l3num,
ct->tuplehash[IP_CT_DIR_ORIGINAL]
.tuple.dst.protonum);
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
NF_CT_ASSERT(l4proto);
if (seq_printf(s, "%-8s %u %ld ",
l4proto->name,
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum,
l4proto->name, nf_ct_protonum(ct),
timer_pending(&ct->timeout)
? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
return -ENOSPC;
......
......@@ -22,8 +22,7 @@
static unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ;
static int icmp_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
{
const struct icmphdr *hp;
......@@ -31,13 +30,13 @@ static int icmp_pkt_to_tuple(const struct sk_buff *skb,
hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
if (hp == NULL)
return 0;
return false;
tuple->dst.u.icmp.type = hp->type;
tuple->src.u.icmp.id = hp->un.echo.id;
tuple->dst.u.icmp.code = hp->code;
return 1;
return true;
}
/* Add 1; spaces filled with 0. */
......@@ -52,17 +51,17 @@ static const u_int8_t invmap[] = {
[ICMP_ADDRESSREPLY] = ICMP_ADDRESS + 1
};
static int icmp_invert_tuple(struct nf_conntrack_tuple *tuple,
static bool icmp_invert_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig)
{
if (orig->dst.u.icmp.type >= sizeof(invmap)
|| !invmap[orig->dst.u.icmp.type])
return 0;
return false;
tuple->src.u.icmp.id = orig->src.u.icmp.id;
tuple->dst.u.icmp.type = invmap[orig->dst.u.icmp.type] - 1;
tuple->dst.u.icmp.code = orig->dst.u.icmp.code;
return 1;
return true;
}
/* Print out the per-protocol part of the tuple. */
......@@ -101,8 +100,8 @@ static int icmp_packet(struct nf_conn *ct,
}
/* Called when a new connection for this protocol found. */
static int icmp_new(struct nf_conn *ct,
const struct sk_buff *skb, unsigned int dataoff)
static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff)
{
static const u_int8_t valid_new[] = {
[ICMP_ECHO] = 1,
......@@ -116,11 +115,11 @@ static int icmp_new(struct nf_conn *ct,
/* Can't create a new ICMP `conn' with this. */
pr_debug("icmp: can't create new conn with type %u\n",
ct->tuplehash[0].tuple.dst.u.icmp.type);
NF_CT_DUMP_TUPLE(&ct->tuplehash[0].tuple);
return 0;
nf_ct_dump_tuple_ip(&ct->tuplehash[0].tuple);
return false;
}
atomic_set(&ct->proto.icmp.count, 0);
return 1;
return true;
}
/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
......
......@@ -150,9 +150,9 @@ find_appropriate_src(const struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range)
{
unsigned int h = hash_by_src(tuple);
struct nf_conn_nat *nat;
struct nf_conn *ct;
struct hlist_node *n;
const struct nf_conn_nat *nat;
const struct nf_conn *ct;
const struct hlist_node *n;
rcu_read_lock();
hlist_for_each_entry_rcu(nat, n, &bysource[h], bysource) {
......@@ -349,7 +349,7 @@ nf_nat_setup_info(struct nf_conn *ct,
EXPORT_SYMBOL(nf_nat_setup_info);
/* Returns true if succeeded. */
static int
static bool
manip_pkt(u_int16_t proto,
struct sk_buff *skb,
unsigned int iphdroff,
......@@ -360,7 +360,7 @@ manip_pkt(u_int16_t proto,
const struct nf_nat_protocol *p;
if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
return 0;
return false;
iph = (void *)skb->data + iphdroff;
......@@ -369,7 +369,7 @@ manip_pkt(u_int16_t proto,
/* rcu_read_lock()ed by nf_hook_slow */
p = __nf_nat_proto_find(proto);
if (!p->manip_pkt(skb, iphdroff, target, maniptype))
return 0;
return false;
iph = (void *)skb->data + iphdroff;
......@@ -380,7 +380,7 @@ manip_pkt(u_int16_t proto,
csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
iph->daddr = target->dst.u3.ip;
}
return 1;
return true;
}
/* Do packet manipulations according to nf_nat_setup_info. */
......@@ -426,7 +426,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
struct icmphdr icmp;
struct iphdr ip;
} *inside;
struct nf_conntrack_l4proto *l4proto;
const struct nf_conntrack_l4proto *l4proto;
struct nf_conntrack_tuple inner, target;
int hdrlen = ip_hdrlen(skb);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
......@@ -544,46 +544,6 @@ void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
}
EXPORT_SYMBOL(nf_nat_protocol_unregister);
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
int
nf_nat_port_range_to_nlattr(struct sk_buff *skb,
const struct nf_nat_range *range)
{
NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MIN, range->min.tcp.port);
NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MAX, range->max.tcp.port);
return 0;
nla_put_failure:
return -1;
}
EXPORT_SYMBOL_GPL(nf_nat_port_nlattr_to_range);
int
nf_nat_port_nlattr_to_range(struct nlattr *tb[], struct nf_nat_range *range)
{
int ret = 0;
/* we have to return whether we actually parsed something or not */
if (tb[CTA_PROTONAT_PORT_MIN]) {
ret = 1;
range->min.tcp.port = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
}
if (!tb[CTA_PROTONAT_PORT_MAX]) {
if (ret)
range->max.tcp.port = range->min.tcp.port;
} else {
ret = 1;
range->max.tcp.port = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
}
return ret;
}
EXPORT_SYMBOL_GPL(nf_nat_port_range_to_nlattr);
#endif
/* Noone using conntrack by the time this called. */
static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
{
......@@ -660,6 +620,9 @@ static int __init nf_nat_init(void)
nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
BUG_ON(nf_nat_seq_adjust_hook != NULL);
rcu_assign_pointer(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
return 0;
cleanup_extend:
......@@ -686,6 +649,8 @@ static void __exit nf_nat_cleanup(void)
nf_ct_free_hashtable(bysource, nf_nat_vmalloced, nf_nat_htable_size);
nf_ct_l3proto_put(l3proto);
nf_ct_extend_unregister(&nat_extend);
rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL);
synchronize_net();
}
MODULE_LICENSE("GPL");
......
......@@ -416,7 +416,6 @@ nf_nat_seq_adjust(struct sk_buff *skb,
return 1;
}
EXPORT_SYMBOL(nf_nat_seq_adjust);
/* Setup NAT on this expected conntrack so it follows master. */
/* If we fail to get a free NAT slot, we'll get dropped on confirm */
......
......@@ -72,7 +72,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
}
pr_debug("trying to unexpect other dir: ");
NF_CT_DUMP_TUPLE(&t);
nf_ct_dump_tuple_ip(&t);
other_exp = nf_ct_expect_find_get(&t);
if (other_exp) {
nf_ct_unexpect_related(other_exp);
......
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
* (C) 2008 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/random.h>
#include <linux/ip.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_rule.h>
#include <net/netfilter/nf_nat_protocol.h>
bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype,
const union nf_conntrack_man_proto *min,
const union nf_conntrack_man_proto *max)
{
__be16 port;
if (maniptype == IP_NAT_MANIP_SRC)
port = tuple->src.u.all;
else
port = tuple->dst.u.all;
return ntohs(port) >= ntohs(min->all) &&
ntohs(port) <= ntohs(max->all);
}
EXPORT_SYMBOL_GPL(nf_nat_proto_in_range);
bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct,
u_int16_t *rover)
{
unsigned int range_size, min, i;
__be16 *portptr;
u_int16_t off;
if (maniptype == IP_NAT_MANIP_SRC)
portptr = &tuple->src.u.all;
else
portptr = &tuple->dst.u.all;
/* If no range specified... */
if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
/* If it's dst rewrite, can't change port */
if (maniptype == IP_NAT_MANIP_DST)
return false;
if (ntohs(*portptr) < 1024) {
/* Loose convention: >> 512 is credential passing */
if (ntohs(*portptr) < 512) {
min = 1;
range_size = 511 - min + 1;
} else {
min = 600;
range_size = 1023 - min + 1;
}
} else {
min = 1024;
range_size = 65535 - 1024 + 1;
}
} else {
min = ntohs(range->min.all);
range_size = ntohs(range->max.all) - min + 1;
}
off = *rover;
if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
off = net_random();
for (i = 0; i < range_size; i++, off++) {
*portptr = htons(min + off % range_size);
if (nf_nat_used_tuple(tuple, ct))
continue;
if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
*rover = off;
return true;
}
return false;
}
EXPORT_SYMBOL_GPL(nf_nat_proto_unique_tuple);
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
int nf_nat_proto_range_to_nlattr(struct sk_buff *skb,
const struct nf_nat_range *range)
{
NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MIN, range->min.all);
NLA_PUT_BE16(skb, CTA_PROTONAT_PORT_MAX, range->max.all);
return 0;
nla_put_failure:
return -1;
}
EXPORT_SYMBOL_GPL(nf_nat_proto_nlattr_to_range);
int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
struct nf_nat_range *range)
{
if (tb[CTA_PROTONAT_PORT_MIN]) {
range->min.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
range->max.all = range->min.tcp.port;
range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
}
if (tb[CTA_PROTONAT_PORT_MAX]) {
range->max.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
}
return 0;
}
EXPORT_SYMBOL_GPL(nf_nat_proto_range_to_nlattr);
#endif
/*
* DCCP NAT protocol helper
*
* Copyright (c) 2005, 2006. 2008 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/dccp.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_protocol.h>
static u_int16_t dccp_port_rover;
static bool
dccp_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
&dccp_port_rover);
}
static bool
dccp_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
const struct iphdr *iph = (const void *)(skb->data + iphdroff);
struct dccp_hdr *hdr;
unsigned int hdroff = iphdroff + iph->ihl * 4;
__be32 oldip, newip;
__be16 *portptr, oldport, newport;
int hdrsize = 8; /* DCCP connection tracking guarantees this much */
if (skb->len >= hdroff + sizeof(struct dccp_hdr))
hdrsize = sizeof(struct dccp_hdr);
if (!skb_make_writable(skb, hdroff + hdrsize))
return false;
iph = (struct iphdr *)(skb->data + iphdroff);
hdr = (struct dccp_hdr *)(skb->data + hdroff);
if (maniptype == IP_NAT_MANIP_SRC) {
oldip = iph->saddr;
newip = tuple->src.u3.ip;
newport = tuple->src.u.dccp.port;
portptr = &hdr->dccph_sport;
} else {
oldip = iph->daddr;
newip = tuple->dst.u3.ip;
newport = tuple->dst.u.dccp.port;
portptr = &hdr->dccph_dport;
}
oldport = *portptr;
*portptr = newport;
if (hdrsize < sizeof(*hdr))
return true;
inet_proto_csum_replace4(&hdr->dccph_checksum, skb, oldip, newip, 1);
inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
0);
return true;
}
static const struct nf_nat_protocol nf_nat_protocol_dccp = {
.protonum = IPPROTO_DCCP,
.me = THIS_MODULE,
.manip_pkt = dccp_manip_pkt,
.in_range = nf_nat_proto_in_range,
.unique_tuple = dccp_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.range_to_nlattr = nf_nat_proto_range_to_nlattr,
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
#endif
};
static int __init nf_nat_proto_dccp_init(void)
{
return nf_nat_protocol_register(&nf_nat_protocol_dccp);
}
static void __exit nf_nat_proto_dccp_fini(void)
{
nf_nat_protocol_unregister(&nf_nat_protocol_dccp);
}
module_init(nf_nat_proto_dccp_init);
module_exit(nf_nat_proto_dccp_fini);
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_DESCRIPTION("DCCP NAT protocol helper");
MODULE_LICENSE("GPL");
......@@ -36,26 +36,8 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
/* is key in given range between min and max */
static int
gre_in_range(const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype,
const union nf_conntrack_man_proto *min,
const union nf_conntrack_man_proto *max)
{
__be16 key;
if (maniptype == IP_NAT_MANIP_SRC)
key = tuple->src.u.gre.key;
else
key = tuple->dst.u.gre.key;
return ntohs(key) >= ntohs(min->gre.key) &&
ntohs(key) <= ntohs(max->gre.key);
}
/* generate unique tuple ... */
static int
static bool
gre_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
......@@ -68,7 +50,7 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
/* If there is no master conntrack we are not PPTP,
do not change tuples */
if (!ct->master)
return 0;
return false;
if (maniptype == IP_NAT_MANIP_SRC)
keyptr = &tuple->src.u.gre.key;
......@@ -89,20 +71,20 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
for (i = 0; i < range_size; i++, key++) {
*keyptr = htons(min + key % range_size);
if (!nf_nat_used_tuple(tuple, ct))
return 1;
return true;
}
pr_debug("%p: no NAT mapping\n", ct);
return 0;
return false;
}
/* manipulate a GRE packet according to maniptype */
static int
static bool
gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
struct gre_hdr *greh;
const struct gre_hdr *greh;
struct gre_hdr_pptp *pgreh;
const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
unsigned int hdroff = iphdroff + iph->ihl * 4;
......@@ -110,7 +92,7 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
/* pgreh includes two optional 32bit fields which are not required
* to be there. That's where the magic '8' comes from */
if (!skb_make_writable(skb, hdroff + sizeof(*pgreh) - 8))
return 0;
return false;
greh = (void *)skb->data + hdroff;
pgreh = (struct gre_hdr_pptp *)greh;
......@@ -118,7 +100,7 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
/* we only have destination manip of a packet, since 'source key'
* is not present in the packet itself */
if (maniptype != IP_NAT_MANIP_DST)
return 1;
return true;
switch (greh->version) {
case GRE_VERSION_1701:
/* We do not currently NAT any GREv0 packets.
......@@ -130,21 +112,20 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
break;
default:
pr_debug("can't nat unknown GRE version\n");
return 0;
return false;
}
return 1;
return true;
}
static const struct nf_nat_protocol gre = {
.name = "GRE",
.protonum = IPPROTO_GRE,
.me = THIS_MODULE,
.manip_pkt = gre_manip_pkt,
.in_range = gre_in_range,
.in_range = nf_nat_proto_in_range,
.unique_tuple = gre_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.range_to_nlattr = nf_nat_port_range_to_nlattr,
.nlattr_to_range = nf_nat_port_nlattr_to_range,
.range_to_nlattr = nf_nat_proto_range_to_nlattr,
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
#endif
};
......
......@@ -17,7 +17,7 @@
#include <net/netfilter/nf_nat_rule.h>
#include <net/netfilter/nf_nat_protocol.h>
static int
static bool
icmp_in_range(const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype,
const union nf_conntrack_man_proto *min,
......@@ -27,7 +27,7 @@ icmp_in_range(const struct nf_conntrack_tuple *tuple,
ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
}
static int
static bool
icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
......@@ -46,12 +46,12 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
(id % range_size));
if (!nf_nat_used_tuple(tuple, ct))
return 1;
return true;
}
return 0;
return false;
}
static int
static bool
icmp_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff,
const struct nf_conntrack_tuple *tuple,
......@@ -62,24 +62,23 @@ icmp_manip_pkt(struct sk_buff *skb,
unsigned int hdroff = iphdroff + iph->ihl*4;
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
return 0;
return false;
hdr = (struct icmphdr *)(skb->data + hdroff);
inet_proto_csum_replace2(&hdr->checksum, skb,
hdr->un.echo.id, tuple->src.u.icmp.id, 0);
hdr->un.echo.id = tuple->src.u.icmp.id;
return 1;
return true;
}
const struct nf_nat_protocol nf_nat_protocol_icmp = {
.name = "ICMP",
.protonum = IPPROTO_ICMP,
.me = THIS_MODULE,
.manip_pkt = icmp_manip_pkt,
.in_range = icmp_in_range,
.unique_tuple = icmp_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.range_to_nlattr = nf_nat_port_range_to_nlattr,
.nlattr_to_range = nf_nat_port_nlattr_to_range,
.range_to_nlattr = nf_nat_proto_range_to_nlattr,
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
#endif
};
/*
* Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/ip.h>
#include <linux/sctp.h>
#include <net/sctp/checksum.h>
#include <net/netfilter/nf_nat_protocol.h>
static u_int16_t nf_sctp_port_rover;
static bool
sctp_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
&nf_sctp_port_rover);
}
static bool
sctp_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
sctp_sctphdr_t *hdr;
unsigned int hdroff = iphdroff + iph->ihl*4;
__be32 oldip, newip;
u32 crc32;
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
return false;
iph = (struct iphdr *)(skb->data + iphdroff);
hdr = (struct sctphdr *)(skb->data + hdroff);
if (maniptype == IP_NAT_MANIP_SRC) {
/* Get rid of src ip and src pt */
oldip = iph->saddr;
newip = tuple->src.u3.ip;
hdr->source = tuple->src.u.sctp.port;
} else {
/* Get rid of dst ip and dst pt */
oldip = iph->daddr;
newip = tuple->dst.u3.ip;
hdr->dest = tuple->dst.u.sctp.port;
}
crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff);
for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next)
crc32 = sctp_update_cksum((u8 *)skb->data, skb_headlen(skb),
crc32);
crc32 = sctp_end_cksum(crc32);
hdr->checksum = htonl(crc32);
return true;
}
static const struct nf_nat_protocol nf_nat_protocol_sctp = {
.protonum = IPPROTO_SCTP,
.me = THIS_MODULE,
.manip_pkt = sctp_manip_pkt,
.in_range = nf_nat_proto_in_range,
.unique_tuple = sctp_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.range_to_nlattr = nf_nat_proto_range_to_nlattr,
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
#endif
};
static int __init nf_nat_proto_sctp_init(void)
{
return nf_nat_protocol_register(&nf_nat_protocol_sctp);
}
static void __exit nf_nat_proto_sctp_exit(void)
{
nf_nat_protocol_unregister(&nf_nat_protocol_sctp);
}
module_init(nf_nat_proto_sctp_init);
module_exit(nf_nat_proto_sctp_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SCTP NAT protocol helper");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
......@@ -8,7 +8,6 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/random.h>
#include <linux/ip.h>
#include <linux/tcp.h>
......@@ -19,75 +18,19 @@
#include <net/netfilter/nf_nat_protocol.h>
#include <net/netfilter/nf_nat_core.h>
static int
tcp_in_range(const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype,
const union nf_conntrack_man_proto *min,
const union nf_conntrack_man_proto *max)
{
__be16 port;
if (maniptype == IP_NAT_MANIP_SRC)
port = tuple->src.u.tcp.port;
else
port = tuple->dst.u.tcp.port;
return ntohs(port) >= ntohs(min->tcp.port) &&
ntohs(port) <= ntohs(max->tcp.port);
}
static u_int16_t tcp_port_rover;
static int
static bool
tcp_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
static u_int16_t port;
__be16 *portptr;
unsigned int range_size, min, i;
if (maniptype == IP_NAT_MANIP_SRC)
portptr = &tuple->src.u.tcp.port;
else
portptr = &tuple->dst.u.tcp.port;
/* If no range specified... */
if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
/* If it's dst rewrite, can't change port */
if (maniptype == IP_NAT_MANIP_DST)
return 0;
/* Map privileged onto privileged. */
if (ntohs(*portptr) < 1024) {
/* Loose convention: >> 512 is credential passing */
if (ntohs(*portptr)<512) {
min = 1;
range_size = 511 - min + 1;
} else {
min = 600;
range_size = 1023 - min + 1;
}
} else {
min = 1024;
range_size = 65535 - 1024 + 1;
}
} else {
min = ntohs(range->min.tcp.port);
range_size = ntohs(range->max.tcp.port) - min + 1;
}
if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
port = net_random();
for (i = 0; i < range_size; i++, port++) {
*portptr = htons(min + port % range_size);
if (!nf_nat_used_tuple(tuple, ct))
return 1;
}
return 0;
return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
&tcp_port_rover);
}
static int
static bool
tcp_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff,
const struct nf_conntrack_tuple *tuple,
......@@ -107,7 +50,7 @@ tcp_manip_pkt(struct sk_buff *skb,
hdrsize = sizeof(struct tcphdr);
if (!skb_make_writable(skb, hdroff + hdrsize))
return 0;
return false;
iph = (struct iphdr *)(skb->data + iphdroff);
hdr = (struct tcphdr *)(skb->data + hdroff);
......@@ -130,22 +73,21 @@ tcp_manip_pkt(struct sk_buff *skb,
*portptr = newport;
if (hdrsize < sizeof(*hdr))
return 1;
return true;
inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1);
inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0);
return 1;
return true;
}
const struct nf_nat_protocol nf_nat_protocol_tcp = {
.name = "TCP",
.protonum = IPPROTO_TCP,
.me = THIS_MODULE,
.manip_pkt = tcp_manip_pkt,
.in_range = tcp_in_range,
.in_range = nf_nat_proto_in_range,
.unique_tuple = tcp_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.range_to_nlattr = nf_nat_port_range_to_nlattr,
.nlattr_to_range = nf_nat_port_nlattr_to_range,
.range_to_nlattr = nf_nat_proto_range_to_nlattr,
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
#endif
};
......@@ -8,7 +8,6 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/random.h>
#include <linux/ip.h>
#include <linux/udp.h>
......@@ -18,74 +17,19 @@
#include <net/netfilter/nf_nat_rule.h>
#include <net/netfilter/nf_nat_protocol.h>
static int
udp_in_range(const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype,
const union nf_conntrack_man_proto *min,
const union nf_conntrack_man_proto *max)
{
__be16 port;
if (maniptype == IP_NAT_MANIP_SRC)
port = tuple->src.u.udp.port;
else
port = tuple->dst.u.udp.port;
return ntohs(port) >= ntohs(min->udp.port) &&
ntohs(port) <= ntohs(max->udp.port);
}
static u_int16_t udp_port_rover;
static int
static bool
udp_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
static u_int16_t port;
__be16 *portptr;
unsigned int range_size, min, i;
if (maniptype == IP_NAT_MANIP_SRC)
portptr = &tuple->src.u.udp.port;
else
portptr = &tuple->dst.u.udp.port;
/* If no range specified... */
if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
/* If it's dst rewrite, can't change port */
if (maniptype == IP_NAT_MANIP_DST)
return 0;
if (ntohs(*portptr) < 1024) {
/* Loose convention: >> 512 is credential passing */
if (ntohs(*portptr)<512) {
min = 1;
range_size = 511 - min + 1;
} else {
min = 600;
range_size = 1023 - min + 1;
}
} else {
min = 1024;
range_size = 65535 - 1024 + 1;
}
} else {
min = ntohs(range->min.udp.port);
range_size = ntohs(range->max.udp.port) - min + 1;
}
if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
port = net_random();
for (i = 0; i < range_size; i++, port++) {
*portptr = htons(min + port % range_size);
if (!nf_nat_used_tuple(tuple, ct))
return 1;
}
return 0;
return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
&udp_port_rover);
}
static int
static bool
udp_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff,
const struct nf_conntrack_tuple *tuple,
......@@ -98,7 +42,7 @@ udp_manip_pkt(struct sk_buff *skb,
__be16 *portptr, newport;
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
return 0;
return false;
iph = (struct iphdr *)(skb->data + iphdroff);
hdr = (struct udphdr *)(skb->data + hdroff);
......@@ -124,18 +68,17 @@ udp_manip_pkt(struct sk_buff *skb,
hdr->check = CSUM_MANGLED_0;
}
*portptr = newport;
return 1;
return true;
}
const struct nf_nat_protocol nf_nat_protocol_udp = {
.name = "UDP",
.protonum = IPPROTO_UDP,
.me = THIS_MODULE,
.manip_pkt = udp_manip_pkt,
.in_range = udp_in_range,
.in_range = nf_nat_proto_in_range,
.unique_tuple = udp_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.range_to_nlattr = nf_nat_port_range_to_nlattr,
.nlattr_to_range = nf_nat_port_nlattr_to_range,
.range_to_nlattr = nf_nat_proto_range_to_nlattr,
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
#endif
};
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
* (C) 2008 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_protocol.h>
static u_int16_t udplite_port_rover;
static bool
udplite_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
&udplite_port_rover);
}
static bool
udplite_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
struct udphdr *hdr;
unsigned int hdroff = iphdroff + iph->ihl*4;
__be32 oldip, newip;
__be16 *portptr, newport;
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
return false;
iph = (struct iphdr *)(skb->data + iphdroff);
hdr = (struct udphdr *)(skb->data + hdroff);
if (maniptype == IP_NAT_MANIP_SRC) {
/* Get rid of src ip and src pt */
oldip = iph->saddr;
newip = tuple->src.u3.ip;
newport = tuple->src.u.udp.port;
portptr = &hdr->source;
} else {
/* Get rid of dst ip and dst pt */
oldip = iph->daddr;
newip = tuple->dst.u3.ip;
newport = tuple->dst.u.udp.port;
portptr = &hdr->dest;
}
inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1);
inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, 0);
if (!hdr->check)
hdr->check = CSUM_MANGLED_0;
*portptr = newport;
return true;
}
static const struct nf_nat_protocol nf_nat_protocol_udplite = {
.protonum = IPPROTO_UDPLITE,
.me = THIS_MODULE,
.manip_pkt = udplite_manip_pkt,
.in_range = nf_nat_proto_in_range,
.unique_tuple = udplite_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.range_to_nlattr = nf_nat_proto_range_to_nlattr,
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
#endif
};
static int __init nf_nat_proto_udplite_init(void)
{
return nf_nat_protocol_register(&nf_nat_protocol_udplite);
}
static void __exit nf_nat_proto_udplite_fini(void)
{
nf_nat_protocol_unregister(&nf_nat_protocol_udplite);
}
module_init(nf_nat_proto_udplite_init);
module_exit(nf_nat_proto_udplite_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("UDP-Lite NAT protocol helper");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
......@@ -18,35 +18,34 @@
#include <net/netfilter/nf_nat_rule.h>
#include <net/netfilter/nf_nat_protocol.h>
static int unknown_in_range(const struct nf_conntrack_tuple *tuple,
static bool unknown_in_range(const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type manip_type,
const union nf_conntrack_man_proto *min,
const union nf_conntrack_man_proto *max)
{
return 1;
return true;
}
static int unknown_unique_tuple(struct nf_conntrack_tuple *tuple,
static bool unknown_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
/* Sorry: we can't help you; if it's not unique, we can't frob
anything. */
return 0;
return false;
}
static int
static bool
unknown_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
return 1;
return true;
}
const struct nf_nat_protocol nf_nat_unknown_protocol = {
.name = "unknown",
/* .me isn't set: getting a ref to this cannot fail. */
.manip_pkt = unknown_manip_pkt,
.in_range = unknown_in_range,
......
......@@ -61,7 +61,7 @@ static struct
static struct xt_table __nat_table = {
.name = "nat",
.valid_hooks = NAT_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
.lock = __RW_LOCK_UNLOCKED(__nat_table.lock),
.me = THIS_MODULE,
.af = AF_INET,
};
......@@ -143,7 +143,7 @@ static bool ipt_snat_checkentry(const char *tablename,
void *targinfo,
unsigned int hook_mask)
{
struct nf_nat_multi_range_compat *mr = targinfo;
const struct nf_nat_multi_range_compat *mr = targinfo;
/* Must be a valid range */
if (mr->rangesize != 1) {
......@@ -159,7 +159,7 @@ static bool ipt_dnat_checkentry(const char *tablename,
void *targinfo,
unsigned int hook_mask)
{
struct nf_nat_multi_range_compat *mr = targinfo;
const struct nf_nat_multi_range_compat *mr = targinfo;
/* Must be a valid range */
if (mr->rangesize != 1) {
......@@ -188,25 +188,6 @@ alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
}
unsigned int
alloc_null_binding_confirmed(struct nf_conn *ct, unsigned int hooknum)
{
__be32 ip
= (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip
: ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
__be16 all
= (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.all
: ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.all);
struct nf_nat_range range
= { IP_NAT_RANGE_MAP_IPS, ip, ip, { all }, { all } };
pr_debug("Allocating NULL binding for confirmed %p (%u.%u.%u.%u)\n",
ct, NIPQUAD(ip));
return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
}
int nf_nat_rule_find(struct sk_buff *skb,
unsigned int hooknum,
const struct net_device *in,
......
......@@ -220,7 +220,7 @@ static unsigned char asn1_length_decode(struct asn1_ctx *ctx,
if (ch < 0x80)
*len = ch;
else {
cnt = (unsigned char) (ch & 0x7F);
cnt = ch & 0x7F;
*len = 0;
while (cnt > 0) {
......@@ -618,8 +618,7 @@ struct snmp_cnv
int syntax;
};
static struct snmp_cnv snmp_conv [] =
{
static const struct snmp_cnv snmp_conv[] = {
{ASN1_UNI, ASN1_NUL, SNMP_NULL},
{ASN1_UNI, ASN1_INT, SNMP_INTEGER},
{ASN1_UNI, ASN1_OTS, SNMP_OCTETSTR},
......@@ -644,7 +643,7 @@ static unsigned char snmp_tag_cls2syntax(unsigned int tag,
unsigned int cls,
unsigned short *syntax)
{
struct snmp_cnv *cnv;
const struct snmp_cnv *cnv;
cnv = snmp_conv;
......@@ -904,7 +903,7 @@ static inline void mangle_address(unsigned char *begin,
u_int32_t old;
if (debug)
memcpy(&old, (unsigned char *)addr, sizeof(old));
memcpy(&old, addr, sizeof(old));
*addr = map->to;
......@@ -999,7 +998,7 @@ static unsigned char snmp_trap_decode(struct asn1_ctx *ctx,
*
*****************************************************************************/
static void hex_dump(unsigned char *buf, size_t len)
static void hex_dump(const unsigned char *buf, size_t len)
{
size_t i;
......@@ -1080,7 +1079,7 @@ static int snmp_parse_mangle(unsigned char *msg,
if (cls != ASN1_CTX || con != ASN1_CON)
return 0;
if (debug > 1) {
unsigned char *pdus[] = {
static const unsigned char *const pdus[] = {
[SNMP_PDU_GET] = "get",
[SNMP_PDU_NEXT] = "get-next",
[SNMP_PDU_RESPONSE] = "response",
......@@ -1232,8 +1231,8 @@ static int help(struct sk_buff *skb, unsigned int protoff,
{
int dir = CTINFO2DIR(ctinfo);
unsigned int ret;
struct iphdr *iph = ip_hdr(skb);
struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl);
const struct iphdr *iph = ip_hdr(skb);
const struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl);
/* SNMP replies and originating SNMP traps get mangled */
if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY)
......
......@@ -30,8 +30,8 @@
#ifdef CONFIG_XFRM
static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
{
struct nf_conn *ct;
struct nf_conntrack_tuple *t;
const struct nf_conn *ct;
const struct nf_conntrack_tuple *t;
enum ip_conntrack_info ctinfo;
enum ip_conntrack_dir dir;
unsigned long statusbit;
......@@ -50,7 +50,10 @@ static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
if (ct->status & statusbit) {
fl->fl4_dst = t->dst.u3.ip;
if (t->dst.protonum == IPPROTO_TCP ||
t->dst.protonum == IPPROTO_UDP)
t->dst.protonum == IPPROTO_UDP ||
t->dst.protonum == IPPROTO_UDPLITE ||
t->dst.protonum == IPPROTO_DCCP ||
t->dst.protonum == IPPROTO_SCTP)
fl->fl_ip_dport = t->dst.u.tcp.port;
}
......@@ -59,7 +62,10 @@ static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
if (ct->status & statusbit) {
fl->fl4_src = t->src.u3.ip;
if (t->dst.protonum == IPPROTO_TCP ||
t->dst.protonum == IPPROTO_UDP)
t->dst.protonum == IPPROTO_UDP ||
t->dst.protonum == IPPROTO_UDPLITE ||
t->dst.protonum == IPPROTO_DCCP ||
t->dst.protonum == IPPROTO_SCTP)
fl->fl_ip_sport = t->src.u.tcp.port;
}
}
......@@ -87,21 +93,8 @@ nf_nat_fn(unsigned int hooknum,
have dropped it. Hence it's the user's responsibilty to
packet filter it out, or implement conntrack/NAT for that
protocol. 8) --RR */
if (!ct) {
/* Exception: ICMP redirect to new connection (not in
hash table yet). We must not let this through, in
case we're doing NAT to the same network. */
if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
struct icmphdr _hdr, *hp;
hp = skb_header_pointer(skb, ip_hdrlen(skb),
sizeof(_hdr), &_hdr);
if (hp != NULL &&
hp->type == ICMP_REDIRECT)
return NF_DROP;
}
if (!ct)
return NF_ACCEPT;
}
/* Don't try to NAT if this packet is not conntracked */
if (ct == &nf_conntrack_untracked)
......@@ -109,6 +102,9 @@ nf_nat_fn(unsigned int hooknum,
nat = nfct_nat(ct);
if (!nat) {
/* NAT module was loaded late. */
if (nf_ct_is_confirmed(ct))
return NF_ACCEPT;
nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
if (nat == NULL) {
pr_debug("failed to add NAT extension\n");
......@@ -134,10 +130,7 @@ nf_nat_fn(unsigned int hooknum,
if (!nf_nat_initialized(ct, maniptype)) {
unsigned int ret;
if (unlikely(nf_ct_is_confirmed(ct)))
/* NAT module was loaded late */
ret = alloc_null_binding_confirmed(ct, hooknum);
else if (hooknum == NF_INET_LOCAL_IN)
if (hooknum == NF_INET_LOCAL_IN)
/* LOCAL_IN hook doesn't have a chain! */
ret = alloc_null_binding(ct, hooknum);
else
......@@ -189,7 +182,7 @@ nf_nat_out(unsigned int hooknum,
int (*okfn)(struct sk_buff *))
{
#ifdef CONFIG_XFRM
struct nf_conn *ct;
const struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
#endif
unsigned int ret;
......@@ -223,7 +216,7 @@ nf_nat_local_fn(unsigned int hooknum,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct nf_conn *ct;
const struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
unsigned int ret;
......@@ -252,25 +245,6 @@ nf_nat_local_fn(unsigned int hooknum,
return ret;
}
static unsigned int
nf_nat_adjust(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
ct = nf_ct_get(skb, &ctinfo);
if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) {
pr_debug("nf_nat_standalone: adjusting sequence number\n");
if (!nf_nat_seq_adjust(skb, ct, ctinfo))
return NF_DROP;
}
return NF_ACCEPT;
}
/* We must be after connection tracking and before packet filtering. */
static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
......@@ -290,14 +264,6 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_NAT_SRC,
},
/* After conntrack, adjust sequence number */
{
.hook = nf_nat_adjust,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_NAT_SEQ_ADJUST,
},
/* Before packet filtering, change destination */
{
.hook = nf_nat_local_fn,
......@@ -314,14 +280,6 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_NAT_SRC,
},
/* After conntrack, adjust sequence number */
{
.hook = nf_nat_adjust,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_NAT_SEQ_ADJUST,
},
};
static int __init nf_nat_standalone_init(void)
......
......@@ -121,12 +121,40 @@ __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
}
return csum;
}
EXPORT_SYMBOL(nf_ip6_checksum);
static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, unsigned int len,
u_int8_t protocol)
{
struct ipv6hdr *ip6h = ipv6_hdr(skb);
__wsum hsum;
__sum16 csum = 0;
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
if (len == skb->len - dataoff)
return nf_ip6_checksum(skb, hook, dataoff, protocol);
/* fall through */
case CHECKSUM_NONE:
hsum = skb_checksum(skb, 0, dataoff, 0);
skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
&ip6h->daddr,
skb->len - dataoff,
protocol,
csum_sub(0, hsum)));
skb->ip_summed = CHECKSUM_NONE;
csum = __skb_checksum_complete_head(skb, dataoff + len);
if (!csum)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
return csum;
};
static const struct nf_afinfo nf_ip6_afinfo = {
.family = AF_INET6,
.checksum = nf_ip6_checksum,
.checksum_partial = nf_ip6_checksum_partial,
.route = nf_ip6_route,
.saveroute = nf_ip6_saveroute,
.reroute = nf_ip6_reroute,
......
......@@ -325,7 +325,7 @@ static void trace_packet(struct sk_buff *skb,
struct ip6t_entry *e)
{
void *table_base;
struct ip6t_entry *root;
const struct ip6t_entry *root;
char *hookname, *chainname, *comment;
unsigned int rulenum = 0;
......@@ -952,7 +952,7 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
struct xt_table_info *private = table->private;
const struct xt_table_info *private = table->private;
/* We need atomic snapshot of counters: rest doesn't change
(other than comefrom, which userspace doesn't care
......@@ -979,9 +979,9 @@ copy_entries_to_user(unsigned int total_size,
unsigned int off, num;
struct ip6t_entry *e;
struct xt_counters *counters;
struct xt_table_info *private = table->private;
const struct xt_table_info *private = table->private;
int ret = 0;
void *loc_cpu_entry;
const void *loc_cpu_entry;
counters = alloc_counters(table);
if (IS_ERR(counters))
......@@ -1001,8 +1001,8 @@ copy_entries_to_user(unsigned int total_size,
/* ... then go back and fix counters and names */
for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
unsigned int i;
struct ip6t_entry_match *m;
struct ip6t_entry_target *t;
const struct ip6t_entry_match *m;
const struct ip6t_entry_target *t;
e = (struct ip6t_entry *)(loc_cpu_entry + off);
if (copy_to_user(userptr + off
......@@ -1142,7 +1142,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
"ip6table_%s", name);
if (t && !IS_ERR(t)) {
struct ip6t_getinfo info;
struct xt_table_info *private = t->private;
const struct xt_table_info *private = t->private;
#ifdef CONFIG_COMPAT
if (compat) {
......@@ -1206,7 +1206,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
else {
duprintf("get_entries: I've got %u not %u!\n",
private->size, get.size);
ret = -EINVAL;
ret = -EAGAIN;
}
module_put(t->me);
xt_table_unlock(t);
......@@ -1225,7 +1225,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct xt_table *t;
struct xt_table_info *oldinfo;
struct xt_counters *counters;
void *loc_cpu_old_entry;
const void *loc_cpu_old_entry;
ret = 0;
counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
......@@ -1369,9 +1369,9 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
int size;
void *ptmp;
struct xt_table *t;
struct xt_table_info *private;
const struct xt_table_info *private;
int ret = 0;
void *loc_cpu_entry;
const void *loc_cpu_entry;
#ifdef CONFIG_COMPAT
struct compat_xt_counters_info compat_tmp;
......@@ -1905,11 +1905,11 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
void __user *userptr)
{
struct xt_counters *counters;
struct xt_table_info *private = table->private;
const struct xt_table_info *private = table->private;
void __user *pos;
unsigned int size;
int ret = 0;
void *loc_cpu_entry;
const void *loc_cpu_entry;
unsigned int i = 0;
counters = alloc_counters(table);
......@@ -1956,7 +1956,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
xt_compat_lock(AF_INET6);
t = xt_find_table_lock(net, AF_INET6, get.name);
if (t && !IS_ERR(t)) {
struct xt_table_info *private = t->private;
const struct xt_table_info *private = t->private;
struct xt_table_info info;
duprintf("t->private->number = %u\n", private->number);
ret = compat_table_info(private, &info);
......@@ -1966,7 +1966,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
} else if (!ret) {
duprintf("compat_get_entries: I've got %u not %u!\n",
private->size, get.size);
ret = -EINVAL;
ret = -EAGAIN;
}
xt_compat_flush_offsets(AF_INET6);
module_put(t->me);
......@@ -2155,7 +2155,8 @@ icmp6_match(const struct sk_buff *skb,
unsigned int protoff,
bool *hotdrop)
{
struct icmp6hdr _icmph, *ic;
const struct icmp6hdr *ic;
struct icmp6hdr _icmph;
const struct ip6t_icmp *icmpinfo = matchinfo;
/* Must not be a fragment. */
......
......@@ -363,11 +363,15 @@ static void dump_packet(const struct nf_loginfo *info,
if ((logflags & IP6T_LOG_UID) && recurse && skb->sk) {
read_lock_bh(&skb->sk->sk_callback_lock);
if (skb->sk->sk_socket && skb->sk->sk_socket->file)
printk("UID=%u GID=%u",
printk("UID=%u GID=%u ",
skb->sk->sk_socket->file->f_uid,
skb->sk->sk_socket->file->f_gid);
read_unlock_bh(&skb->sk->sk_callback_lock);
}
/* Max length: 16 "MARK=0xFFFFFFFF " */
if (!recurse && skb->mark)
printk("MARK=0x%x ", skb->mark);
}
static struct nf_loginfo default_loginfo = {
......
......@@ -41,7 +41,8 @@ static void send_reset(struct sk_buff *oldskb)
struct tcphdr otcph, *tcph;
unsigned int otcplen, hh_len;
int tcphoff, needs_ack;
struct ipv6hdr *oip6h = ipv6_hdr(oldskb), *ip6h;
const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
struct ipv6hdr *ip6h;
struct dst_entry *dst = NULL;
u8 proto;
struct flowi fl;
......
......@@ -49,7 +49,8 @@ ipv6header_mt6(const struct sk_buff *skb, const struct net_device *in,
temp = 0;
while (ip6t_ext_hdr(nexthdr)) {
struct ipv6_opt_hdr _hdr, *hp;
const struct ipv6_opt_hdr *hp;
struct ipv6_opt_hdr _hdr;
int hdrlen;
/* Is there enough space for the next ext header? */
......
......@@ -110,7 +110,8 @@ rt_mt6(const struct sk_buff *skb, const struct net_device *in,
!!(rtinfo->invflags & IP6T_RT_INV_TYP)));
if (ret && (rtinfo->flags & IP6T_RT_RES)) {
u_int32_t *rp, _reserved;
const u_int32_t *rp;
u_int32_t _reserved;
rp = skb_header_pointer(skb,
ptr + offsetof(struct rt0_hdr,
reserved),
......
......@@ -54,7 +54,7 @@ static struct
static struct xt_table packet_filter = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
.lock = __RW_LOCK_UNLOCKED(packet_filter.lock),
.me = THIS_MODULE,
.af = AF_INET6,
};
......
......@@ -60,7 +60,7 @@ static struct
static struct xt_table packet_mangler = {
.name = "mangle",
.valid_hooks = MANGLE_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
.lock = __RW_LOCK_UNLOCKED(packet_mangler.lock),
.me = THIS_MODULE,
.af = AF_INET6,
};
......
......@@ -38,7 +38,7 @@ static struct
static struct xt_table packet_raw = {
.name = "raw",
.valid_hooks = RAW_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
.lock = __RW_LOCK_UNLOCKED(packet_raw.lock),
.me = THIS_MODULE,
.af = AF_INET6,
};
......
......@@ -27,7 +27,7 @@
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_core.h>
static int ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
static bool ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
struct nf_conntrack_tuple *tuple)
{
const u_int32_t *ap;
......@@ -36,21 +36,21 @@ static int ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
ap = skb_header_pointer(skb, nhoff + offsetof(struct ipv6hdr, saddr),
sizeof(_addrs), _addrs);
if (ap == NULL)
return 0;
return false;
memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
return 1;
return true;
}
static int ipv6_invert_tuple(struct nf_conntrack_tuple *tuple,
static bool ipv6_invert_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig)
{
memcpy(tuple->src.u3.ip6, orig->dst.u3.ip6, sizeof(tuple->src.u3.ip6));
memcpy(tuple->dst.u3.ip6, orig->src.u3.ip6, sizeof(tuple->dst.u3.ip6));
return 1;
return true;
}
static int ipv6_print_tuple(struct seq_file *s,
......
......@@ -28,7 +28,7 @@
static unsigned long nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
static int icmpv6_pkt_to_tuple(const struct sk_buff *skb,
static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
{
......@@ -37,12 +37,12 @@ static int icmpv6_pkt_to_tuple(const struct sk_buff *skb,
hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
if (hp == NULL)
return 0;
return false;
tuple->dst.u.icmp.type = hp->icmp6_type;
tuple->src.u.icmp.id = hp->icmp6_identifier;
tuple->dst.u.icmp.code = hp->icmp6_code;
return 1;
return true;
}
/* Add 1; spaces filled with 0. */
......@@ -53,17 +53,17 @@ static const u_int8_t invmap[] = {
[ICMPV6_NI_REPLY - 128] = ICMPV6_NI_REPLY +1
};
static int icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig)
{
int type = orig->dst.u.icmp.type - 128;
if (type < 0 || type >= sizeof(invmap) || !invmap[type])
return 0;
return false;
tuple->src.u.icmp.id = orig->src.u.icmp.id;
tuple->dst.u.icmp.type = invmap[type] - 1;
tuple->dst.u.icmp.code = orig->dst.u.icmp.code;
return 1;
return true;
}
/* Print out the per-protocol part of the tuple. */
......@@ -102,8 +102,7 @@ static int icmpv6_packet(struct nf_conn *ct,
}
/* Called when a new connection for this protocol found. */
static int icmpv6_new(struct nf_conn *ct,
const struct sk_buff *skb,
static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff)
{
static const u_int8_t valid_new[] = {
......@@ -116,11 +115,11 @@ static int icmpv6_new(struct nf_conn *ct,
/* Can't create a new ICMPv6 `conn' with this. */
pr_debug("icmpv6: can't create new conn with type %u\n",
type + 128);
NF_CT_DUMP_TUPLE(&ct->tuplehash[0].tuple);
return 0;
nf_ct_dump_tuple_ipv6(&ct->tuplehash[0].tuple);
return false;
}
atomic_set(&ct->proto.icmp.count, 0);
return 1;
return true;
}
static int
......
......@@ -103,8 +103,8 @@ struct ctl_table nf_ct_ipv6_sysctl_table[] = {
};
#endif
static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
struct in6_addr *daddr)
static unsigned int ip6qhashfn(__be32 id, const struct in6_addr *saddr,
const struct in6_addr *daddr)
{
u32 a, b, c;
......@@ -132,7 +132,7 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
static unsigned int nf_hashfn(struct inet_frag_queue *q)
{
struct nf_ct_frag6_queue *nq;
const struct nf_ct_frag6_queue *nq;
nq = container_of(q, struct nf_ct_frag6_queue, q);
return ip6qhashfn(nq->id, &nq->saddr, &nq->daddr);
......@@ -222,7 +222,7 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
struct frag_hdr *fhdr, int nhoff)
const struct frag_hdr *fhdr, int nhoff)
{
struct sk_buff *prev, *next;
int offset, end;
......
......@@ -86,6 +86,16 @@ config NF_CONNTRACK_EVENTS
If unsure, say `N'.
config NF_CT_PROTO_DCCP
tristate 'DCCP protocol connection tracking support (EXPERIMENTAL)'
depends on EXPERIMENTAL && NF_CONNTRACK
depends on NETFILTER_ADVANCED
help
With this option enabled, the layer 3 independent connection
tracking code will be able to do state tracking on DCCP connections.
If unsure, say 'N'.
config NF_CT_PROTO_GRE
tristate
depends on NF_CONNTRACK
......
......@@ -13,6 +13,7 @@ obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o
# SCTP protocol connection tracking
obj-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o
obj-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
obj-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
obj-$(CONFIG_NF_CT_PROTO_UDPLITE) += nf_conntrack_proto_udplite.o
......
......@@ -53,7 +53,7 @@ enum amanda_strings {
};
static struct {
char *string;
const char *string;
size_t len;
struct ts_config *ts;
} search[] __read_mostly = {
......@@ -91,7 +91,6 @@ static int amanda_help(struct sk_buff *skb,
char pbuf[sizeof("65535")], *tmp;
u_int16_t len;
__be16 port;
int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
int ret = NF_ACCEPT;
typeof(nf_nat_amanda_hook) nf_nat_amanda;
......@@ -148,7 +147,8 @@ static int amanda_help(struct sk_buff *skb,
goto out;
}
tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, family,
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
nf_ct_l3num(ct),
&tuple->src.u3, &tuple->dst.u3,
IPPROTO_TCP, NULL, &port);
......
......@@ -94,7 +94,7 @@ static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
nf_conntrack_hash_rnd);
}
int
bool
nf_ct_get_tuple(const struct sk_buff *skb,
unsigned int nhoff,
unsigned int dataoff,
......@@ -108,7 +108,7 @@ nf_ct_get_tuple(const struct sk_buff *skb,
tuple->src.l3num = l3num;
if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
return 0;
return false;
tuple->dst.protonum = protonum;
tuple->dst.dir = IP_CT_DIR_ORIGINAL;
......@@ -117,10 +117,8 @@ nf_ct_get_tuple(const struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
int nf_ct_get_tuplepr(const struct sk_buff *skb,
unsigned int nhoff,
u_int16_t l3num,
struct nf_conntrack_tuple *tuple)
bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
u_int16_t l3num, struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_l3proto *l3proto;
struct nf_conntrack_l4proto *l4proto;
......@@ -134,7 +132,7 @@ int nf_ct_get_tuplepr(const struct sk_buff *skb,
ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
if (ret != NF_ACCEPT) {
rcu_read_unlock();
return 0;
return false;
}
l4proto = __nf_ct_l4proto_find(l3num, protonum);
......@@ -147,7 +145,7 @@ int nf_ct_get_tuplepr(const struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
int
bool
nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig,
const struct nf_conntrack_l3proto *l3proto,
......@@ -157,7 +155,7 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
inverse->src.l3num = orig->src.l3num;
if (l3proto->invert_tuple(inverse, orig) == 0)
return 0;
return false;
inverse->dst.dir = !orig->dst.dir;
......@@ -194,8 +192,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
* destroy_conntrack() MUST NOT be called with a write lock
* to nf_conntrack_lock!!! -HW */
rcu_read_lock();
l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num,
ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
if (l4proto && l4proto->destroy)
l4proto->destroy(ct);
......@@ -739,10 +736,10 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(nf_conntrack_in);
int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig)
{
int ret;
bool ret;
rcu_read_lock();
ret = nf_ct_invert_tuple(inverse, orig,
......@@ -766,10 +763,10 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
pr_debug("Altering reply tuple of %p to ", ct);
NF_CT_DUMP_TUPLE(newreply);
nf_ct_dump_tuple(newreply);
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
if (ct->master || (help && help->expecting != 0))
if (ct->master || (help && !hlist_empty(&help->expectations)))
return;
rcu_read_lock();
......
......@@ -71,6 +71,9 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
int i, newlen, newoff;
struct nf_ct_ext_type *t;
/* Conntrack must not be confirmed to avoid races on reallocation. */
NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
if (!ct->ext)
return nf_ct_ext_create(&ct->ext, id, gfp);
......
......@@ -350,8 +350,9 @@ static int help(struct sk_buff *skb,
enum ip_conntrack_info ctinfo)
{
unsigned int dataoff, datalen;
struct tcphdr _tcph, *th;
char *fb_ptr;
const struct tcphdr *th;
struct tcphdr _tcph;
const char *fb_ptr;
int ret;
u32 seq;
int dir = CTINFO2DIR(ctinfo);
......@@ -405,7 +406,7 @@ static int help(struct sk_buff *skb,
/* Initialize IP/IPv6 addr to expected address (it's not mentioned
in EPSV responses) */
cmd.l3num = ct->tuplehash[dir].tuple.src.l3num;
cmd.l3num = nf_ct_l3num(ct);
memcpy(cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all,
sizeof(cmd.u3.all));
......@@ -452,7 +453,7 @@ static int help(struct sk_buff *skb,
daddr = &ct->tuplehash[!dir].tuple.dst.u3;
/* Update the ftp info */
if ((cmd.l3num == ct->tuplehash[dir].tuple.src.l3num) &&
if ((cmd.l3num == nf_ct_l3num(ct)) &&
memcmp(&cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all,
sizeof(cmd.u3.all))) {
/* Enrico Scholz's passive FTP to partially RNAT'd ftp
......
......@@ -218,7 +218,6 @@ static int get_h245_addr(struct nf_conn *ct, const unsigned char *data,
union nf_inet_addr *addr, __be16 *port)
{
const unsigned char *p;
int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
int len;
if (taddr->choice != eH245_TransportAddress_unicastAddress)
......@@ -226,13 +225,13 @@ static int get_h245_addr(struct nf_conn *ct, const unsigned char *data,
switch (taddr->unicastAddress.choice) {
case eUnicastAddress_iPAddress:
if (family != AF_INET)
if (nf_ct_l3num(ct) != AF_INET)
return 0;
p = data + taddr->unicastAddress.iPAddress.network;
len = 4;
break;
case eUnicastAddress_iP6Address:
if (family != AF_INET6)
if (nf_ct_l3num(ct) != AF_INET6)
return 0;
p = data + taddr->unicastAddress.iP6Address.network;
len = 16;
......@@ -277,8 +276,7 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
/* Create expect for RTP */
if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(rtp_exp, NF_CT_EXPECT_CLASS_DEFAULT,
ct->tuplehash[!dir].tuple.src.l3num,
nf_ct_expect_init(rtp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
IPPROTO_UDP, NULL, &rtp_port);
......@@ -288,8 +286,7 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
nf_ct_expect_put(rtp_exp);
return -1;
}
nf_ct_expect_init(rtcp_exp, NF_CT_EXPECT_CLASS_DEFAULT,
ct->tuplehash[!dir].tuple.src.l3num,
nf_ct_expect_init(rtcp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
IPPROTO_UDP, NULL, &rtcp_port);
......@@ -306,9 +303,9 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
if (nf_ct_expect_related(rtp_exp) == 0) {
if (nf_ct_expect_related(rtcp_exp) == 0) {
pr_debug("nf_ct_h323: expect RTP ");
NF_CT_DUMP_TUPLE(&rtp_exp->tuple);
nf_ct_dump_tuple(&rtp_exp->tuple);
pr_debug("nf_ct_h323: expect RTCP ");
NF_CT_DUMP_TUPLE(&rtcp_exp->tuple);
nf_ct_dump_tuple(&rtcp_exp->tuple);
} else {
nf_ct_unexpect_related(rtp_exp);
ret = -1;
......@@ -346,8 +343,7 @@ static int expect_t120(struct sk_buff *skb,
/* Create expect for T.120 connections */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
ct->tuplehash[!dir].tuple.src.l3num,
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
IPPROTO_TCP, NULL, &port);
......@@ -364,7 +360,7 @@ static int expect_t120(struct sk_buff *skb,
} else { /* Conntrack only */
if (nf_ct_expect_related(exp) == 0) {
pr_debug("nf_ct_h323: expect T.120 ");
NF_CT_DUMP_TUPLE(&exp->tuple);
nf_ct_dump_tuple(&exp->tuple);
} else
ret = -1;
}
......@@ -586,7 +582,7 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
while (get_tpkt_data(skb, protoff, ct, ctinfo,
&data, &datalen, &dataoff)) {
pr_debug("nf_ct_h245: TPKT len=%d ", datalen);
NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
/* Decode H.245 signal */
ret = DecodeMultimediaSystemControlMessage(data, datalen,
......@@ -634,18 +630,17 @@ int get_h225_addr(struct nf_conn *ct, unsigned char *data,
union nf_inet_addr *addr, __be16 *port)
{
const unsigned char *p;
int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
int len;
switch (taddr->choice) {
case eTransportAddress_ipAddress:
if (family != AF_INET)
if (nf_ct_l3num(ct) != AF_INET)
return 0;
p = data + taddr->ipAddress.ip;
len = 4;
break;
case eTransportAddress_ip6Address:
if (family != AF_INET6)
if (nf_ct_l3num(ct) != AF_INET6)
return 0;
p = data + taddr->ip6Address.ip;
len = 16;
......@@ -683,8 +678,7 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
/* Create expect for h245 connection */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
ct->tuplehash[!dir].tuple.src.l3num,
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
IPPROTO_TCP, NULL, &port);
......@@ -701,7 +695,7 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
} else { /* Conntrack only */
if (nf_ct_expect_related(exp) == 0) {
pr_debug("nf_ct_q931: expect H.245 ");
NF_CT_DUMP_TUPLE(&exp->tuple);
nf_ct_dump_tuple(&exp->tuple);
} else
ret = -1;
}
......@@ -792,7 +786,7 @@ static int expect_callforwarding(struct sk_buff *skb,
* we don't need to track the second call */
if (callforward_filter &&
callforward_do_filter(&addr, &ct->tuplehash[!dir].tuple.src.u3,
ct->tuplehash[!dir].tuple.src.l3num)) {
nf_ct_l3num(ct))) {
pr_debug("nf_ct_q931: Call Forwarding not tracked\n");
return 0;
}
......@@ -800,8 +794,7 @@ static int expect_callforwarding(struct sk_buff *skb,
/* Create expect for the second call leg */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
ct->tuplehash[!dir].tuple.src.l3num,
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3, &addr,
IPPROTO_TCP, NULL, &port);
exp->helper = nf_conntrack_helper_q931;
......@@ -817,7 +810,7 @@ static int expect_callforwarding(struct sk_buff *skb,
} else { /* Conntrack only */
if (nf_ct_expect_related(exp) == 0) {
pr_debug("nf_ct_q931: expect Call Forwarding ");
NF_CT_DUMP_TUPLE(&exp->tuple);
nf_ct_dump_tuple(&exp->tuple);
} else
ret = -1;
}
......@@ -1137,7 +1130,7 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
while (get_tpkt_data(skb, protoff, ct, ctinfo,
&data, &datalen, &dataoff)) {
pr_debug("nf_ct_q931: TPKT len=%d ", datalen);
NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
/* Decode Q.931 signal */
ret = DecodeQ931(data, datalen, &q931);
......@@ -1272,8 +1265,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
/* Create expect for Q.931 */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
ct->tuplehash[!dir].tuple.src.l3num,
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
gkrouted_only ? /* only accept calls from GK? */
&ct->tuplehash[!dir].tuple.src.u3 : NULL,
&ct->tuplehash[!dir].tuple.dst.u3,
......@@ -1287,7 +1279,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
} else { /* Conntrack only */
if (nf_ct_expect_related(exp) == 0) {
pr_debug("nf_ct_ras: expect Q.931 ");
NF_CT_DUMP_TUPLE(&exp->tuple);
nf_ct_dump_tuple(&exp->tuple);
/* Save port for looking up expect in processing RCF */
info->sig_port[dir] = port;
......@@ -1344,15 +1336,14 @@ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
/* Need new expect */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
ct->tuplehash[!dir].tuple.src.l3num,
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3, &addr,
IPPROTO_UDP, NULL, &port);
exp->helper = nf_conntrack_helper_ras;
if (nf_ct_expect_related(exp) == 0) {
pr_debug("nf_ct_ras: expect RAS ");
NF_CT_DUMP_TUPLE(&exp->tuple);
nf_ct_dump_tuple(&exp->tuple);
} else
ret = -1;
......@@ -1436,7 +1427,7 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
pr_debug("nf_ct_ras: set Q.931 expect "
"timeout to %u seconds for",
info->timeout);
NF_CT_DUMP_TUPLE(&exp->tuple);
nf_ct_dump_tuple(&exp->tuple);
set_expect_timeout(exp, info->timeout);
}
spin_unlock_bh(&nf_conntrack_lock);
......@@ -1549,8 +1540,7 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
/* Need new expect */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
ct->tuplehash[!dir].tuple.src.l3num,
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3, &addr,
IPPROTO_TCP, NULL, &port);
exp->flags = NF_CT_EXPECT_PERMANENT;
......@@ -1558,7 +1548,7 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
if (nf_ct_expect_related(exp) == 0) {
pr_debug("nf_ct_ras: expect Q.931 ");
NF_CT_DUMP_TUPLE(&exp->tuple);
nf_ct_dump_tuple(&exp->tuple);
} else
ret = -1;
......@@ -1603,8 +1593,7 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
/* Need new expect for call signal */
if ((exp = nf_ct_expect_alloc(ct)) == NULL)
return -1;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
ct->tuplehash[!dir].tuple.src.l3num,
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&ct->tuplehash[!dir].tuple.src.u3, &addr,
IPPROTO_TCP, NULL, &port);
exp->flags = NF_CT_EXPECT_PERMANENT;
......@@ -1612,7 +1601,7 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
if (nf_ct_expect_related(exp) == 0) {
pr_debug("nf_ct_ras: expect Q.931 ");
NF_CT_DUMP_TUPLE(&exp->tuple);
nf_ct_dump_tuple(&exp->tuple);
} else
ret = -1;
......@@ -1716,7 +1705,7 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff,
if (data == NULL)
goto accept;
pr_debug("nf_ct_ras: RAS message len=%d ", datalen);
NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
nf_ct_dump_tuple(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple);
/* Decode RAS message */
ret = DecodeRasMessage(data, datalen, &ras);
......
......@@ -126,7 +126,7 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
{
struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_expect *exp;
struct hlist_node *n, *next;
const struct hlist_node *n, *next;
unsigned int i;
mutex_lock(&nf_ct_helper_mutex);
......
......@@ -50,7 +50,7 @@ MODULE_PARM_DESC(max_dcc_channels, "max number of expected DCC channels per "
module_param(dcc_timeout, uint, 0400);
MODULE_PARM_DESC(dcc_timeout, "timeout on for unestablished DCC channels");
static const char *dccprotos[] = {
static const char *const dccprotos[] = {
"SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT "
};
......@@ -65,7 +65,7 @@ static const char *dccprotos[] = {
* ad_beg_p returns pointer to first byte of addr data
* ad_end_p returns pointer to last byte of addr data
*/
static int parse_dcc(char *data, char *data_end, u_int32_t *ip,
static int parse_dcc(char *data, const char *data_end, u_int32_t *ip,
u_int16_t *port, char **ad_beg_p, char **ad_end_p)
{
/* at least 12: "AAAAAAAA P\1\n" */
......@@ -93,9 +93,11 @@ static int help(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
unsigned int dataoff;
struct iphdr *iph;
struct tcphdr _tcph, *th;
char *data, *data_limit, *ib_ptr;
const struct iphdr *iph;
const struct tcphdr *th;
struct tcphdr _tcph;
const char *data_limit;
char *data, *ib_ptr;
int dir = CTINFO2DIR(ctinfo);
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple *tuple;
......@@ -159,7 +161,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
/* we have at least
* (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
* data left (== 14/13 bytes) */
if (parse_dcc((char *)data, data_limit, &dcc_ip,
if (parse_dcc(data, data_limit, &dcc_ip,
&dcc_port, &addr_beg_p, &addr_end_p)) {
pr_debug("unable to parse dcc command\n");
continue;
......
......@@ -31,22 +31,22 @@
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
static int generic_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
static bool generic_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
struct nf_conntrack_tuple *tuple)
{
memset(&tuple->src.u3, 0, sizeof(tuple->src.u3));
memset(&tuple->dst.u3, 0, sizeof(tuple->dst.u3));
return 1;
return true;
}
static int generic_invert_tuple(struct nf_conntrack_tuple *tuple,
static bool generic_invert_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig)
{
memset(&tuple->src.u3, 0, sizeof(tuple->src.u3));
memset(&tuple->dst.u3, 0, sizeof(tuple->dst.u3));
return 1;
return true;
}
static int generic_print_tuple(struct seq_file *s,
......
......@@ -145,10 +145,11 @@ ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
static inline int
ctnetlink_dump_protoinfo(struct sk_buff *skb, const struct nf_conn *ct)
{
struct nf_conntrack_l4proto *l4proto = nf_ct_l4proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum);
struct nf_conntrack_l4proto *l4proto;
struct nlattr *nest_proto;
int ret;
l4proto = nf_ct_l4proto_find_get(nf_ct_l3num(ct), nf_ct_protonum(ct));
if (!l4proto->to_nlattr) {
nf_ct_l4proto_put(l4proto);
return 0;
......@@ -368,8 +369,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
nfmsg = NLMSG_DATA(nlh);
nlh->nlmsg_flags = (nowait && pid) ? NLM_F_MULTI : 0;
nfmsg->nfgen_family =
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
nfmsg->nfgen_family = nf_ct_l3num(ct);
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = 0;
......@@ -454,7 +454,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
nfmsg = NLMSG_DATA(nlh);
nlh->nlmsg_flags = flags;
nfmsg->nfgen_family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
nfmsg->nfgen_family = nf_ct_l3num(ct);
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = 0;
......@@ -535,8 +535,6 @@ static int ctnetlink_done(struct netlink_callback *cb)
return 0;
}
#define L3PROTO(ct) (ct)->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num
static int
ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
......@@ -558,7 +556,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
/* Dump entries of a given L3 protocol number.
* If it is not specified, ie. l3proto == 0,
* then dump everything. */
if (l3proto && L3PROTO(ct) != l3proto)
if (l3proto && nf_ct_l3num(ct) != l3proto)
continue;
if (cb->args[1]) {
if (ct != last)
......@@ -704,20 +702,11 @@ static int nfnetlink_parse_nat_proto(struct nlattr *attr,
if (err < 0)
return err;
npt = nf_nat_proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum);
if (!npt->nlattr_to_range) {
nf_nat_proto_put(npt);
return 0;
}
/* nlattr_to_range returns 1 if it parsed, 0 if not, neg. on error */
if (npt->nlattr_to_range(tb, range) > 0)
range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
npt = nf_nat_proto_find_get(nf_ct_protonum(ct));
if (npt->nlattr_to_range)
err = npt->nlattr_to_range(tb, range);
nf_nat_proto_put(npt);
return 0;
return err;
}
static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
......@@ -1010,14 +999,11 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, struct nlattr *cda[])
{
struct nlattr *tb[CTA_PROTOINFO_MAX+1], *attr = cda[CTA_PROTOINFO];
struct nf_conntrack_l4proto *l4proto;
u_int16_t npt = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
u_int16_t l3num = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
int err = 0;
nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, NULL);
l4proto = nf_ct_l4proto_find_get(l3num, npt);
l4proto = nf_ct_l4proto_find_get(nf_ct_l3num(ct), nf_ct_protonum(ct));
if (l4proto->from_nlattr)
err = l4proto->from_nlattr(tb, ct);
nf_ct_l4proto_put(l4proto);
......
......@@ -119,7 +119,7 @@ static void pptp_expectfn(struct nf_conn *ct,
/* obviously this tuple inversion only works until you do NAT */
nf_ct_invert_tuplepr(&inv_t, &exp->tuple);
pr_debug("trying to unexpect other dir: ");
NF_CT_DUMP_TUPLE(&inv_t);
nf_ct_dump_tuple(&inv_t);
exp_other = nf_ct_expect_find_get(&inv_t);
if (exp_other) {
......@@ -141,7 +141,7 @@ static int destroy_sibling_or_exp(const struct nf_conntrack_tuple *t)
struct nf_conn *sibling;
pr_debug("trying to timeout ct or exp for tuple ");
NF_CT_DUMP_TUPLE(t);
nf_ct_dump_tuple(t);
h = nf_conntrack_find_get(t);
if (h) {
......@@ -209,7 +209,7 @@ static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid)
/* original direction, PNS->PAC */
dir = IP_CT_DIR_ORIGINAL;
nf_ct_expect_init(exp_orig, NF_CT_EXPECT_CLASS_DEFAULT,
ct->tuplehash[dir].tuple.src.l3num,
nf_ct_l3num(ct),
&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[dir].tuple.dst.u3,
IPPROTO_GRE, &peer_callid, &callid);
......@@ -218,7 +218,7 @@ static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid)
/* reply direction, PAC->PNS */
dir = IP_CT_DIR_REPLY;
nf_ct_expect_init(exp_reply, NF_CT_EXPECT_CLASS_DEFAULT,
ct->tuplehash[dir].tuple.src.l3num,
nf_ct_l3num(ct),
&ct->tuplehash[dir].tuple.src.u3,
&ct->tuplehash[dir].tuple.dst.u3,
IPPROTO_GRE, &callid, &peer_callid);
......
......@@ -146,18 +146,15 @@ EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put);
static int kill_l3proto(struct nf_conn *i, void *data)
{
return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num ==
((struct nf_conntrack_l3proto *)data)->l3proto);
return nf_ct_l3num(i) == ((struct nf_conntrack_l3proto *)data)->l3proto;
}
static int kill_l4proto(struct nf_conn *i, void *data)
{
struct nf_conntrack_l4proto *l4proto;
l4proto = (struct nf_conntrack_l4proto *)data;
return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum ==
l4proto->l4proto) &&
(i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num ==
l4proto->l3proto);
return nf_ct_protonum(i) == l4proto->l4proto &&
nf_ct_l3num(i) == l4proto->l3proto;
}
static int nf_ct_l3proto_register_sysctl(struct nf_conntrack_l3proto *l3proto)
......
This diff is collapsed.
......@@ -14,23 +14,23 @@
static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
static int generic_pkt_to_tuple(const struct sk_buff *skb,
static bool generic_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
{
tuple->src.u.all = 0;
tuple->dst.u.all = 0;
return 1;
return true;
}
static int generic_invert_tuple(struct nf_conntrack_tuple *tuple,
static bool generic_invert_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig)
{
tuple->src.u.all = 0;
tuple->dst.u.all = 0;
return 1;
return true;
}
/* Print out the per-protocol part of the tuple. */
......@@ -53,10 +53,10 @@ static int packet(struct nf_conn *ct,
}
/* Called when a new connection for this protocol found. */
static int new(struct nf_conn *ct, const struct sk_buff *skb,
static bool new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff)
{
return 1;
return true;
}
#ifdef CONFIG_SYSCTL
......
......@@ -82,7 +82,7 @@ static __be16 gre_keymap_lookup(struct nf_conntrack_tuple *t)
read_unlock_bh(&nf_ct_gre_lock);
pr_debug("lookup src key 0x%x for ", key);
NF_CT_DUMP_TUPLE(t);
nf_ct_dump_tuple(t);
return key;
}
......@@ -113,7 +113,7 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
*kmp = km;
pr_debug("adding new entry %p: ", km);
NF_CT_DUMP_TUPLE(&km->tuple);
nf_ct_dump_tuple(&km->tuple);
write_lock_bh(&nf_ct_gre_lock);
list_add_tail(&km->list, &gre_keymap_list);
......@@ -148,17 +148,16 @@ EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_destroy);
/* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */
/* invert gre part of tuple */
static int gre_invert_tuple(struct nf_conntrack_tuple *tuple,
static bool gre_invert_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig)
{
tuple->dst.u.gre.key = orig->src.u.gre.key;
tuple->src.u.gre.key = orig->dst.u.gre.key;
return 1;
return true;
}
/* gre hdr info to tuple */
static int gre_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
static bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
{
const struct gre_hdr_pptp *pgrehdr;
......@@ -173,24 +172,24 @@ static int gre_pkt_to_tuple(const struct sk_buff *skb,
/* try to behave like "nf_conntrack_proto_generic" */
tuple->src.u.all = 0;
tuple->dst.u.all = 0;
return 1;
return true;
}
/* PPTP header is variable length, only need up to the call_id field */
pgrehdr = skb_header_pointer(skb, dataoff, 8, &_pgrehdr);
if (!pgrehdr)
return 1;
return true;
if (ntohs(grehdr->protocol) != GRE_PROTOCOL_PPTP) {
pr_debug("GRE_VERSION_PPTP but unknown proto\n");
return 0;
return false;
}
tuple->dst.u.gre.key = pgrehdr->call_id;
srckey = gre_keymap_lookup(tuple);
tuple->src.u.gre.key = srckey;
return 1;
return true;
}
/* print gre part of tuple */
......@@ -235,18 +234,18 @@ static int gre_packet(struct nf_conn *ct,
}
/* Called when a new connection for this protocol found. */
static int gre_new(struct nf_conn *ct, const struct sk_buff *skb,
static bool gre_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff)
{
pr_debug(": ");
NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
/* initialize to sane value. Ideally a conntrack helper
* (e.g. in case of pptp) is increasing them */
ct->proto.gre.stream_timeout = GRE_STREAM_TIMEOUT;
ct->proto.gre.timeout = GRE_TIMEOUT;
return 1;
return true;
}
/* Called when a conntrack entry has already been removed from the hashes
......
This diff is collapsed.
This diff is collapsed.
......@@ -26,7 +26,7 @@
static unsigned int nf_ct_udp_timeout __read_mostly = 30*HZ;
static unsigned int nf_ct_udp_timeout_stream __read_mostly = 180*HZ;
static int udp_pkt_to_tuple(const struct sk_buff *skb,
static bool udp_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
{
......@@ -36,20 +36,20 @@ static int udp_pkt_to_tuple(const struct sk_buff *skb,
/* Actually only need first 8 bytes. */
hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
if (hp == NULL)
return 0;
return false;
tuple->src.u.udp.port = hp->source;
tuple->dst.u.udp.port = hp->dest;
return 1;
return true;
}
static int udp_invert_tuple(struct nf_conntrack_tuple *tuple,
static bool udp_invert_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig)
{
tuple->src.u.udp.port = orig->dst.u.udp.port;
tuple->dst.u.udp.port = orig->src.u.udp.port;
return 1;
return true;
}
/* Print out the per-protocol part of the tuple. */
......@@ -83,10 +83,10 @@ static int udp_packet(struct nf_conn *ct,
}
/* Called when a new connection for this protocol found. */
static int udp_new(struct nf_conn *ct, const struct sk_buff *skb,
static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff)
{
return 1;
return true;
}
static int udp_error(struct sk_buff *skb, unsigned int dataoff,
......
This diff is collapsed.
......@@ -72,7 +72,6 @@ static int help(struct sk_buff *skb,
struct nf_conntrack_tuple *tuple;
struct sane_request *req;
struct sane_reply_net_start *reply;
int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
ct_sane_info = &nfct_help(ct)->help.ct_sane_info;
/* Until there's been traffic both ways, don't look in packets. */
......@@ -143,12 +142,12 @@ static int help(struct sk_buff *skb,
}
tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, family,
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
&tuple->src.u3, &tuple->dst.u3,
IPPROTO_TCP, NULL, &reply->port);
pr_debug("nf_ct_sane: expect: ");
NF_CT_DUMP_TUPLE(&exp->tuple);
nf_ct_dump_tuple(&exp->tuple);
/* Can't expect this? Best to drop packet now. */
if (nf_ct_expect_related(exp) != 0)
......
This diff is collapsed.
This diff is collapsed.
......@@ -44,7 +44,6 @@ static int tftp_help(struct sk_buff *skb,
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple *tuple;
unsigned int ret = NF_ACCEPT;
int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
typeof(nf_nat_tftp_hook) nf_nat_tftp;
tfh = skb_header_pointer(skb, protoff + sizeof(struct udphdr),
......@@ -56,19 +55,20 @@ static int tftp_help(struct sk_buff *skb,
case TFTP_OPCODE_READ:
case TFTP_OPCODE_WRITE:
/* RRQ and WRQ works the same way */
NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
exp = nf_ct_expect_alloc(ct);
if (exp == NULL)
return NF_DROP;
tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, family,
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
nf_ct_l3num(ct),
&tuple->src.u3, &tuple->dst.u3,
IPPROTO_UDP, NULL, &tuple->dst.u.udp.port);
pr_debug("expect: ");
NF_CT_DUMP_TUPLE(&exp->tuple);
nf_ct_dump_tuple(&exp->tuple);
nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook);
if (nf_nat_tftp && ct->status & IPS_NAT_MASK)
......
This diff is collapsed.
......@@ -55,7 +55,7 @@ static void secmark_save(const struct sk_buff *skb)
static void secmark_restore(struct sk_buff *skb)
{
if (!skb->secmark) {
struct nf_conn *ct;
const struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
ct = nf_ct_get(skb, &ctinfo);
......
......@@ -96,7 +96,7 @@ xt_rateest_tg_checkentry(const char *tablename,
void *targinfo,
unsigned int hook_mask)
{
struct xt_rateest_target_info *info = (void *)targinfo;
struct xt_rateest_target_info *info = targinfo;
struct xt_rateest *est;
struct {
struct nlattr opt;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment