Commit ec43f55f authored by Linus Torvalds's avatar Linus Torvalds

Undo the netfilter undo - the next patch fixes the regression it caused.

Cset exclude: torvalds@ppc970.osdl.org|ChangeSet|20050311080305|44342
parent e8a54287
...@@ -18,7 +18,8 @@ ...@@ -18,7 +18,8 @@
#define NF_STOLEN 2 #define NF_STOLEN 2
#define NF_QUEUE 3 #define NF_QUEUE 3
#define NF_REPEAT 4 #define NF_REPEAT 4
#define NF_MAX_VERDICT NF_REPEAT #define NF_STOP 5
#define NF_MAX_VERDICT NF_STOP
/* Generic cache responses from hook functions. /* Generic cache responses from hook functions.
<= 0x2000 is used for protocol-flags. */ <= 0x2000 is used for protocol-flags. */
...@@ -138,21 +139,32 @@ void nf_log_packet(int pf, ...@@ -138,21 +139,32 @@ void nf_log_packet(int pf,
/* This is gross, but inline doesn't cut it for avoiding the function /* This is gross, but inline doesn't cut it for avoiding the function
call in fast path: gcc doesn't inline (needs value tracking?). --RR */ call in fast path: gcc doesn't inline (needs value tracking?). --RR */
#ifdef CONFIG_NETFILTER_DEBUG #ifdef CONFIG_NETFILTER_DEBUG
#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \ #define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn), INT_MIN) ({int __ret; \
#define NF_HOOK_THRESH nf_hook_slow if ((__ret=nf_hook_slow(pf, hook, &(skb), indev, outdev, okfn, INT_MIN)) == 1) \
__ret = (okfn)(skb); \
__ret;})
#define NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, thresh) \
({int __ret; \
if ((__ret=nf_hook_slow(pf, hook, &(skb), indev, outdev, okfn, thresh)) == 1) \
__ret = (okfn)(skb); \
__ret;})
#else #else
#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \ #define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
(list_empty(&nf_hooks[(pf)][(hook)]) \ ({int __ret; \
? (okfn)(skb) \ if (list_empty(&nf_hooks[pf][hook]) || \
: nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn), INT_MIN)) (__ret=nf_hook_slow(pf, hook, &(skb), indev, outdev, okfn, INT_MIN)) == 1) \
#define NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, thresh) \ __ret = (okfn)(skb); \
(list_empty(&nf_hooks[(pf)][(hook)]) \ __ret;})
? (okfn)(skb) \ #define NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, thresh) \
: nf_hook_slow((pf), (hook), (skb), (indev), (outdev), (okfn), (thresh))) ({int __ret; \
if (list_empty(&nf_hooks[pf][hook]) || \
(__ret=nf_hook_slow(pf, hook, &(skb), indev, outdev, okfn, thresh)) == 1) \
__ret = (okfn)(skb); \
__ret;})
#endif #endif
int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb, int nf_hook_slow(int pf, unsigned int hook, struct sk_buff **pskb,
struct net_device *indev, struct net_device *outdev, struct net_device *indev, struct net_device *outdev,
int (*okfn)(struct sk_buff *), int thresh); int (*okfn)(struct sk_buff *), int thresh);
......
...@@ -829,8 +829,7 @@ static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff **pskb, ...@@ -829,8 +829,7 @@ static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff **pskb,
{ {
if ((*pskb)->nf_bridge && if ((*pskb)->nf_bridge &&
!((*pskb)->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) { !((*pskb)->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
okfn(*pskb); return NF_STOP;
return NF_STOLEN;
} }
return NF_ACCEPT; return NF_ACCEPT;
...@@ -891,8 +890,7 @@ static unsigned int ip_sabotage_out(unsigned int hook, struct sk_buff **pskb, ...@@ -891,8 +890,7 @@ static unsigned int ip_sabotage_out(unsigned int hook, struct sk_buff **pskb,
if (out->priv_flags & IFF_802_1Q_VLAN) if (out->priv_flags & IFF_802_1Q_VLAN)
nf_bridge->netoutdev = (struct net_device *)out; nf_bridge->netoutdev = (struct net_device *)out;
#endif #endif
okfn(skb); return NF_STOP;
return NF_STOLEN;
} }
return NF_ACCEPT; return NF_ACCEPT;
......
...@@ -349,6 +349,8 @@ static unsigned int nf_iterate(struct list_head *head, ...@@ -349,6 +349,8 @@ static unsigned int nf_iterate(struct list_head *head,
int (*okfn)(struct sk_buff *), int (*okfn)(struct sk_buff *),
int hook_thresh) int hook_thresh)
{ {
unsigned int verdict;
/* /*
* The caller must not block between calls to this * The caller must not block between calls to this
* function because of risk of continuing from deleted element. * function because of risk of continuing from deleted element.
...@@ -361,28 +363,18 @@ static unsigned int nf_iterate(struct list_head *head, ...@@ -361,28 +363,18 @@ static unsigned int nf_iterate(struct list_head *head,
/* Optimization: we don't need to hold module /* Optimization: we don't need to hold module
reference here, since function can't sleep. --RR */ reference here, since function can't sleep. --RR */
switch (elem->hook(hook, skb, indev, outdev, okfn)) { verdict = elem->hook(hook, skb, indev, outdev, okfn);
case NF_QUEUE: if (verdict != NF_ACCEPT) {
return NF_QUEUE;
case NF_STOLEN:
return NF_STOLEN;
case NF_DROP:
return NF_DROP;
case NF_REPEAT:
*i = (*i)->prev;
break;
#ifdef CONFIG_NETFILTER_DEBUG #ifdef CONFIG_NETFILTER_DEBUG
case NF_ACCEPT: if (unlikely(verdict > NF_MAX_VERDICT)) {
break; NFDEBUG("Evil return from %p(%u).\n",
elem->hook, hook);
default: continue;
NFDEBUG("Evil return from %p(%u).\n", }
elem->hook, hook);
#endif #endif
if (verdict != NF_REPEAT)
return verdict;
*i = (*i)->prev;
} }
} }
return NF_ACCEPT; return NF_ACCEPT;
...@@ -494,7 +486,9 @@ static int nf_queue(struct sk_buff *skb, ...@@ -494,7 +486,9 @@ static int nf_queue(struct sk_buff *skb,
return 1; return 1;
} }
int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb, /* Returns 1 if okfn() needs to be executed by the caller,
* -EPERM for NF_DROP, 0 otherwise. */
int nf_hook_slow(int pf, unsigned int hook, struct sk_buff **pskb,
struct net_device *indev, struct net_device *indev,
struct net_device *outdev, struct net_device *outdev,
int (*okfn)(struct sk_buff *), int (*okfn)(struct sk_buff *),
...@@ -508,34 +502,29 @@ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb, ...@@ -508,34 +502,29 @@ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
rcu_read_lock(); rcu_read_lock();
#ifdef CONFIG_NETFILTER_DEBUG #ifdef CONFIG_NETFILTER_DEBUG
if (skb->nf_debug & (1 << hook)) { if (unlikely((*pskb)->nf_debug & (1 << hook))) {
printk("nf_hook: hook %i already set.\n", hook); printk("nf_hook: hook %i already set.\n", hook);
nf_dump_skb(pf, skb); nf_dump_skb(pf, *pskb);
} }
skb->nf_debug |= (1 << hook); (*pskb)->nf_debug |= (1 << hook);
#endif #endif
elem = &nf_hooks[pf][hook]; elem = &nf_hooks[pf][hook];
next_hook: next_hook:
verdict = nf_iterate(&nf_hooks[pf][hook], &skb, hook, indev, verdict = nf_iterate(&nf_hooks[pf][hook], pskb, hook, indev,
outdev, &elem, okfn, hook_thresh); outdev, &elem, okfn, hook_thresh);
if (verdict == NF_QUEUE) { if (verdict == NF_ACCEPT || verdict == NF_STOP) {
ret = 1;
goto unlock;
} else if (verdict == NF_DROP) {
kfree_skb(*pskb);
ret = -EPERM;
} else if (verdict == NF_QUEUE) {
NFDEBUG("nf_hook: Verdict = QUEUE.\n"); NFDEBUG("nf_hook: Verdict = QUEUE.\n");
if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn)) if (!nf_queue(*pskb, elem, pf, hook, indev, outdev, okfn))
goto next_hook; goto next_hook;
} }
unlock:
switch (verdict) {
case NF_ACCEPT:
ret = okfn(skb);
break;
case NF_DROP:
kfree_skb(skb);
ret = -EPERM;
break;
}
rcu_read_unlock(); rcu_read_unlock();
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment