Commit e072d3e0 authored by Linus Torvalds's avatar Linus Torvalds

Cset exclude: bdschuym@pandora.be|ChangeSet|20050310042817|04711

parent 771a7305
...@@ -18,8 +18,7 @@ ...@@ -18,8 +18,7 @@
#define NF_STOLEN 2 #define NF_STOLEN 2
#define NF_QUEUE 3 #define NF_QUEUE 3
#define NF_REPEAT 4 #define NF_REPEAT 4
#define NF_STOP 5 #define NF_MAX_VERDICT NF_REPEAT
#define NF_MAX_VERDICT NF_STOP
/* Generic cache responses from hook functions. /* Generic cache responses from hook functions.
<= 0x2000 is used for protocol-flags. */ <= 0x2000 is used for protocol-flags. */
...@@ -139,32 +138,21 @@ void nf_log_packet(int pf, ...@@ -139,32 +138,21 @@ void nf_log_packet(int pf,
/* This is gross, but inline doesn't cut it for avoiding the function /* This is gross, but inline doesn't cut it for avoiding the function
call in fast path: gcc doesn't inline (needs value tracking?). --RR */ call in fast path: gcc doesn't inline (needs value tracking?). --RR */
#ifdef CONFIG_NETFILTER_DEBUG #ifdef CONFIG_NETFILTER_DEBUG
#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \ #define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
({int __ret; \ nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn), INT_MIN)
if ((__ret=nf_hook_slow(pf, hook, &(skb), indev, outdev, okfn, INT_MIN)) == 1) \ #define NF_HOOK_THRESH nf_hook_slow
__ret = (okfn)(skb); \
__ret;})
#define NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, thresh) \
({int __ret; \
if ((__ret=nf_hook_slow(pf, hook, &(skb), indev, outdev, okfn, thresh)) == 1) \
__ret = (okfn)(skb); \
__ret;})
#else #else
#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \ #define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
({int __ret; \ (list_empty(&nf_hooks[(pf)][(hook)]) \
if (list_empty(&nf_hooks[pf][hook]) || \ ? (okfn)(skb) \
(__ret=nf_hook_slow(pf, hook, &(skb), indev, outdev, okfn, INT_MIN)) == 1) \ : nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn), INT_MIN))
__ret = (okfn)(skb); \ #define NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, thresh) \
__ret;}) (list_empty(&nf_hooks[(pf)][(hook)]) \
#define NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, thresh) \ ? (okfn)(skb) \
({int __ret; \ : nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn), (thresh)))
if (list_empty(&nf_hooks[pf][hook]) || \
(__ret=nf_hook_slow(pf, hook, &(skb), indev, outdev, okfn, thresh)) == 1) \
__ret = (okfn)(skb); \
__ret;})
#endif #endif
int nf_hook_slow(int pf, unsigned int hook, struct sk_buff **pskb, int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
struct net_device *indev, struct net_device *outdev, struct net_device *indev, struct net_device *outdev,
int (*okfn)(struct sk_buff *), int thresh); int (*okfn)(struct sk_buff *), int thresh);
......
...@@ -829,7 +829,8 @@ static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff **pskb, ...@@ -829,7 +829,8 @@ static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff **pskb,
{ {
if ((*pskb)->nf_bridge && if ((*pskb)->nf_bridge &&
!((*pskb)->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) { !((*pskb)->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
return NF_STOP; okfn(*pskb);
return NF_STOLEN;
} }
return NF_ACCEPT; return NF_ACCEPT;
...@@ -890,7 +891,8 @@ static unsigned int ip_sabotage_out(unsigned int hook, struct sk_buff **pskb, ...@@ -890,7 +891,8 @@ static unsigned int ip_sabotage_out(unsigned int hook, struct sk_buff **pskb,
if (out->priv_flags & IFF_802_1Q_VLAN) if (out->priv_flags & IFF_802_1Q_VLAN)
nf_bridge->netoutdev = (struct net_device *)out; nf_bridge->netoutdev = (struct net_device *)out;
#endif #endif
return NF_STOP; okfn(skb);
return NF_STOLEN;
} }
return NF_ACCEPT; return NF_ACCEPT;
......
...@@ -349,8 +349,6 @@ static unsigned int nf_iterate(struct list_head *head, ...@@ -349,8 +349,6 @@ static unsigned int nf_iterate(struct list_head *head,
int (*okfn)(struct sk_buff *), int (*okfn)(struct sk_buff *),
int hook_thresh) int hook_thresh)
{ {
unsigned int verdict;
/* /*
* The caller must not block between calls to this * The caller must not block between calls to this
* function because of risk of continuing from deleted element. * function because of risk of continuing from deleted element.
...@@ -363,18 +361,28 @@ static unsigned int nf_iterate(struct list_head *head, ...@@ -363,18 +361,28 @@ static unsigned int nf_iterate(struct list_head *head,
/* Optimization: we don't need to hold module /* Optimization: we don't need to hold module
reference here, since function can't sleep. --RR */ reference here, since function can't sleep. --RR */
verdict = elem->hook(hook, skb, indev, outdev, okfn); switch (elem->hook(hook, skb, indev, outdev, okfn)) {
if (verdict != NF_ACCEPT) { case NF_QUEUE:
return NF_QUEUE;
case NF_STOLEN:
return NF_STOLEN;
case NF_DROP:
return NF_DROP;
case NF_REPEAT:
*i = (*i)->prev;
break;
#ifdef CONFIG_NETFILTER_DEBUG #ifdef CONFIG_NETFILTER_DEBUG
if (unlikely(verdict > NF_MAX_VERDICT)) { case NF_ACCEPT:
NFDEBUG("Evil return from %p(%u).\n", break;
elem->hook, hook);
continue; default:
} NFDEBUG("Evil return from %p(%u).\n",
elem->hook, hook);
#endif #endif
if (verdict != NF_REPEAT)
return verdict;
*i = (*i)->prev;
} }
} }
return NF_ACCEPT; return NF_ACCEPT;
...@@ -486,9 +494,7 @@ static int nf_queue(struct sk_buff *skb, ...@@ -486,9 +494,7 @@ static int nf_queue(struct sk_buff *skb,
return 1; return 1;
} }
/* Returns 1 if okfn() needs to be executed by the caller, int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
* -EPERM for NF_DROP, 0 otherwise. */
int nf_hook_slow(int pf, unsigned int hook, struct sk_buff **pskb,
struct net_device *indev, struct net_device *indev,
struct net_device *outdev, struct net_device *outdev,
int (*okfn)(struct sk_buff *), int (*okfn)(struct sk_buff *),
...@@ -502,29 +508,34 @@ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff **pskb, ...@@ -502,29 +508,34 @@ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff **pskb,
rcu_read_lock(); rcu_read_lock();
#ifdef CONFIG_NETFILTER_DEBUG #ifdef CONFIG_NETFILTER_DEBUG
if (unlikely((*pskb)->nf_debug & (1 << hook))) { if (skb->nf_debug & (1 << hook)) {
printk("nf_hook: hook %i already set.\n", hook); printk("nf_hook: hook %i already set.\n", hook);
nf_dump_skb(pf, *pskb); nf_dump_skb(pf, skb);
} }
(*pskb)->nf_debug |= (1 << hook); skb->nf_debug |= (1 << hook);
#endif #endif
elem = &nf_hooks[pf][hook]; elem = &nf_hooks[pf][hook];
next_hook: next_hook:
verdict = nf_iterate(&nf_hooks[pf][hook], pskb, hook, indev, verdict = nf_iterate(&nf_hooks[pf][hook], &skb, hook, indev,
outdev, &elem, okfn, hook_thresh); outdev, &elem, okfn, hook_thresh);
if (verdict == NF_ACCEPT || verdict == NF_STOP) { if (verdict == NF_QUEUE) {
ret = 1;
goto unlock;
} else if (verdict == NF_DROP) {
kfree_skb(*pskb);
ret = -EPERM;
} else if (verdict == NF_QUEUE) {
NFDEBUG("nf_hook: Verdict = QUEUE.\n"); NFDEBUG("nf_hook: Verdict = QUEUE.\n");
if (!nf_queue(*pskb, elem, pf, hook, indev, outdev, okfn)) if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn))
goto next_hook; goto next_hook;
} }
unlock:
switch (verdict) {
case NF_ACCEPT:
ret = okfn(skb);
break;
case NF_DROP:
kfree_skb(skb);
ret = -EPERM;
break;
}
rcu_read_unlock(); rcu_read_unlock();
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment