Commit c41a3ca5 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/net-2.6

into home.osdl.org:/home/torvalds/v2.5/linux
parents 7300a7e9 0bbe34be
...@@ -678,4 +678,23 @@ IPv6 Update by: ...@@ -678,4 +678,23 @@ IPv6 Update by:
Pekka Savola <pekkas@netcore.fi> Pekka Savola <pekkas@netcore.fi>
YOSHIFUJI Hideaki / USAGI Project <yoshfuji@linux-ipv6.org> YOSHIFUJI Hideaki / USAGI Project <yoshfuji@linux-ipv6.org>
/proc/sys/net/bridge/* Variables:
bridge-nf-call-arptables - BOOLEAN
1 : pass bridged ARP traffic to arptables' FORWARD chain.
0 : disable this.
Default: 1
bridge-nf-call-iptables - BOOLEAN
1 : pass bridged IPv4 traffic to iptables' chains.
0 : disable this.
Default: 1
bridge-nf-filter-vlan-tagged - BOOLEAN
1 : pass bridged vlan-tagged ARP/IP traffic to arptables/iptables.
0 : disable this.
Default: 1
$Id: ip-sysctl.txt,v 1.20 2001/12/13 09:00:18 davem Exp $ $Id: ip-sysctl.txt,v 1.20 2001/12/13 09:00:18 davem Exp $
This diff is collapsed.
This diff is collapsed.
...@@ -579,6 +579,14 @@ enum { ...@@ -579,6 +579,14 @@ enum {
NET_SCTP_MAX_BURST = 12, NET_SCTP_MAX_BURST = 12,
}; };
/* /proc/sys/net/bridge */
enum {
NET_BRIDGE_NF_CALL_ARPTABLES = 1,
NET_BRIDGE_NF_CALL_IPTABLES = 2,
NET_BRIDGE_NF_CALL_IP6TABLES = 3,
NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4,
};
/* CTL_PROC names: */ /* CTL_PROC names: */
/* CTL_FS names: */ /* CTL_FS names: */
......
...@@ -227,8 +227,7 @@ extern void ax25_cb_add(ax25_cb *); ...@@ -227,8 +227,7 @@ extern void ax25_cb_add(ax25_cb *);
struct sock *ax25_find_listener(ax25_address *, int, struct net_device *, int); struct sock *ax25_find_listener(ax25_address *, int, struct net_device *, int);
struct sock *ax25_get_socket(ax25_address *, ax25_address *, int); struct sock *ax25_get_socket(ax25_address *, ax25_address *, int);
extern ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *, struct net_device *); extern ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *, struct net_device *);
extern struct sock *ax25_addr_match(ax25_address *); extern void ax25_send_to_raw(ax25_address *, struct sk_buff *, int);
extern void ax25_send_to_raw(struct sock *, struct sk_buff *, int);
extern void ax25_destroy_socket(ax25_cb *); extern void ax25_destroy_socket(ax25_cb *);
extern ax25_cb *ax25_create_cb(void); extern ax25_cb *ax25_create_cb(void);
extern void ax25_fillin_cb(ax25_cb *, ax25_dev *); extern void ax25_fillin_cb(ax25_cb *, ax25_dev *);
......
...@@ -81,7 +81,12 @@ static inline int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, struct ...@@ -81,7 +81,12 @@ static inline int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, struct
return -1; return -1;
} }
static inline void tcf_destroy(struct tcf_proto *tp)
{
tp->ops->destroy(tp);
module_put(tp->ops->owner);
kfree(tp);
}
extern int register_tcf_proto_ops(struct tcf_proto_ops *ops); extern int register_tcf_proto_ops(struct tcf_proto_ops *ops);
extern int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); extern int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
......
...@@ -228,45 +228,25 @@ ax25_cb *ax25_find_cb(ax25_address *src_addr, ax25_address *dest_addr, ...@@ -228,45 +228,25 @@ ax25_cb *ax25_find_cb(ax25_address *src_addr, ax25_address *dest_addr,
return NULL; return NULL;
} }
/* void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
* Look for any matching address - RAW sockets can bind to arbitrary names
*/
struct sock *ax25_addr_match(ax25_address *addr)
{ {
struct sock *sk = NULL;
ax25_cb *s; ax25_cb *s;
struct sk_buff *copy;
struct hlist_node *node; struct hlist_node *node;
spin_lock_bh(&ax25_list_lock); spin_lock_bh(&ax25_list_lock);
ax25_for_each(s, node, &ax25_list) { ax25_for_each(s, node, &ax25_list) {
if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 &&
s->sk->sk_type == SOCK_RAW) { s->sk->sk_type == SOCK_RAW &&
sk = s->sk; s->sk->sk_protocol == proto &&
lock_sock(sk); s->ax25_dev->dev == skb->dev &&
break; atomic_read(&s->sk->sk_rmem_alloc) <= s->sk->sk_rcvbuf) {
}
}
spin_unlock_bh(&ax25_list_lock);
return sk;
}
void ax25_send_to_raw(struct sock *sk, struct sk_buff *skb, int proto)
{
struct sk_buff *copy;
struct hlist_node *node;
sk_for_each_from(sk, node)
if (sk->sk_type == SOCK_RAW &&
sk->sk_protocol == proto &&
atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
if ((copy = skb_clone(skb, GFP_ATOMIC)) == NULL) if ((copy = skb_clone(skb, GFP_ATOMIC)) == NULL)
return; continue;
if (sock_queue_rcv_skb(s->sk, copy) != 0)
if (sock_queue_rcv_skb(sk, copy) != 0)
kfree_skb(copy); kfree_skb(copy);
} }
}
} }
/* /*
...@@ -318,7 +298,7 @@ void ax25_destroy_socket(ax25_cb *ax25) ...@@ -318,7 +298,7 @@ void ax25_destroy_socket(ax25_cb *ax25)
ax25_cb *sax25 = ax25_sk(skb->sk); ax25_cb *sax25 = ax25_sk(skb->sk);
/* Queue the unaccepted socket for death */ /* Queue the unaccepted socket for death */
sock_set_flag(skb->sk, SOCK_DEAD); sock_orphan(skb->sk);
ax25_start_heartbeat(sax25); ax25_start_heartbeat(sax25);
sax25->state = AX25_STATE_0; sax25->state = AX25_STATE_0;
...@@ -913,6 +893,7 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev) ...@@ -913,6 +893,7 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
if (oax25->digipeat != NULL) { if (oax25->digipeat != NULL) {
if ((ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { if ((ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
sk_free(sk); sk_free(sk);
ax25_cb_put(ax25);
return NULL; return NULL;
} }
...@@ -934,20 +915,25 @@ static int ax25_release(struct socket *sock) ...@@ -934,20 +915,25 @@ static int ax25_release(struct socket *sock)
return 0; return 0;
sock_hold(sk); sock_hold(sk);
sock_orphan(sk);
lock_sock(sk); lock_sock(sk);
ax25 = ax25_sk(sk); ax25 = ax25_sk(sk);
if (sk->sk_type == SOCK_SEQPACKET) { if (sk->sk_type == SOCK_SEQPACKET) {
switch (ax25->state) { switch (ax25->state) {
case AX25_STATE_0: case AX25_STATE_0:
release_sock(sk);
ax25_disconnect(ax25, 0); ax25_disconnect(ax25, 0);
lock_sock(sk);
ax25_destroy_socket(ax25); ax25_destroy_socket(ax25);
break; break;
case AX25_STATE_1: case AX25_STATE_1:
case AX25_STATE_2: case AX25_STATE_2:
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
release_sock(sk);
ax25_disconnect(ax25, 0); ax25_disconnect(ax25, 0);
lock_sock(sk);
ax25_destroy_socket(ax25); ax25_destroy_socket(ax25);
break; break;
...@@ -980,7 +966,6 @@ static int ax25_release(struct socket *sock) ...@@ -980,7 +966,6 @@ static int ax25_release(struct socket *sock)
sk->sk_state = TCP_CLOSE; sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk); sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
sock_set_flag(sk, SOCK_DESTROY); sock_set_flag(sk, SOCK_DESTROY);
break; break;
...@@ -991,12 +976,10 @@ static int ax25_release(struct socket *sock) ...@@ -991,12 +976,10 @@ static int ax25_release(struct socket *sock)
sk->sk_state = TCP_CLOSE; sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk); sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
ax25_destroy_socket(ax25); ax25_destroy_socket(ax25);
} }
sock->sk = NULL; sock->sk = NULL;
sk->sk_socket = NULL; /* Not used, but we should do this */
release_sock(sk); release_sock(sk);
sock_put(sk); sock_put(sk);
...@@ -1334,11 +1317,13 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags) ...@@ -1334,11 +1317,13 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
release_sock(sk); release_sock(sk);
current->state = TASK_INTERRUPTIBLE; current->state = TASK_INTERRUPTIBLE;
if (flags & O_NONBLOCK) if (flags & O_NONBLOCK) {
current->state = TASK_RUNNING;
remove_wait_queue(sk->sk_sleep, &wait);
return -EWOULDBLOCK; return -EWOULDBLOCK;
}
if (!signal_pending(tsk)) { if (!signal_pending(tsk)) {
schedule(); schedule();
current->state = TASK_RUNNING;
lock_sock(sk); lock_sock(sk);
continue; continue;
} }
......
...@@ -147,7 +147,6 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) ...@@ -147,7 +147,6 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
} }
if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) { if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) {
bh_lock_sock(ax25->sk);
if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) || if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) ||
ax25->pidincl) { ax25->pidincl) {
if (sock_queue_rcv_skb(ax25->sk, skb) == 0) if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
...@@ -155,7 +154,6 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) ...@@ -155,7 +154,6 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
else else
ax25->condition |= AX25_COND_OWN_RX_BUSY; ax25->condition |= AX25_COND_OWN_RX_BUSY;
} }
bh_unlock_sock(ax25->sk);
} }
return queued; return queued;
...@@ -195,7 +193,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, ...@@ -195,7 +193,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
{ {
ax25_address src, dest, *next_digi = NULL; ax25_address src, dest, *next_digi = NULL;
int type = 0, mine = 0, dama; int type = 0, mine = 0, dama;
struct sock *make, *sk, *raw; struct sock *make, *sk;
ax25_digi dp, reverse_dp; ax25_digi dp, reverse_dp;
ax25_cb *ax25; ax25_cb *ax25;
ax25_dev *ax25_dev; ax25_dev *ax25_dev;
...@@ -243,10 +241,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, ...@@ -243,10 +241,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) { if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {
skb->h.raw = skb->data + 2; /* skip control and pid */ skb->h.raw = skb->data + 2; /* skip control and pid */
if ((raw = ax25_addr_match(&dest)) != NULL) { ax25_send_to_raw(&dest, skb, skb->data[1]);
ax25_send_to_raw(raw, skb, skb->data[1]);
release_sock(raw);
}
if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0) { if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0) {
kfree_skb(skb); kfree_skb(skb);
...@@ -381,7 +376,6 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, ...@@ -381,7 +376,6 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
sk->sk_ack_backlog++; sk->sk_ack_backlog++;
bh_unlock_sock(sk); bh_unlock_sock(sk);
sock_put(sk);
} else { } else {
if (!mine) { if (!mine) {
kfree_skb(skb); kfree_skb(skb);
...@@ -407,6 +401,8 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, ...@@ -407,6 +401,8 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
(ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
kfree_skb(skb); kfree_skb(skb);
ax25_destroy_socket(ax25); ax25_destroy_socket(ax25);
if (sk)
sock_put(sk);
return 0; return 0;
} }
...@@ -446,6 +442,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, ...@@ -446,6 +442,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
if (sk) { if (sk) {
if (!sock_flag(sk, SOCK_DEAD)) if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, skb->len); sk->sk_data_ready(sk, skb->len);
sock_put(sk);
} else } else
kfree_skb(skb); kfree_skb(skb);
......
...@@ -360,3 +360,4 @@ module_exit(bt_cleanup); ...@@ -360,3 +360,4 @@ module_exit(bt_cleanup);
MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>"); MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>");
MODULE_DESCRIPTION("Bluetooth Core ver " VERSION); MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
...@@ -707,4 +707,3 @@ module_exit(bnep_cleanup_module); ...@@ -707,4 +707,3 @@ module_exit(bnep_cleanup_module);
MODULE_DESCRIPTION("Bluetooth BNEP ver " VERSION); MODULE_DESCRIPTION("Bluetooth BNEP ver " VERSION);
MODULE_AUTHOR("David Libault <david.libault@inventel.fr>, Maxim Krasnyanskiy <maxk@qualcomm.com>"); MODULE_AUTHOR("David Libault <david.libault@inventel.fr>, Maxim Krasnyanskiy <maxk@qualcomm.com>");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
...@@ -35,6 +35,9 @@ ...@@ -35,6 +35,9 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/checksum.h> #include <asm/checksum.h>
#include "br_private.h" #include "br_private.h"
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
#define skb_origaddr(skb) (((struct bridge_skb_cb *) \ #define skb_origaddr(skb) (((struct bridge_skb_cb *) \
...@@ -47,10 +50,21 @@ ...@@ -47,10 +50,21 @@
#define has_bridge_parent(device) ((device)->br_port != NULL) #define has_bridge_parent(device) ((device)->br_port != NULL)
#define bridge_parent(device) ((device)->br_port->br->dev) #define bridge_parent(device) ((device)->br_port->br->dev)
#define IS_VLAN_IP (skb->protocol == __constant_htons(ETH_P_8021Q) && \ #ifdef CONFIG_SYSCTL
hdr->h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP)) static struct ctl_table_header *brnf_sysctl_header;
#define IS_VLAN_ARP (skb->protocol == __constant_htons(ETH_P_8021Q) && \ static int brnf_call_iptables = 1;
hdr->h_vlan_encapsulated_proto == __constant_htons(ETH_P_ARP)) static int brnf_call_arptables = 1;
static int brnf_filter_vlan_tagged = 1;
#else
#define brnf_filter_vlan_tagged 1
#endif
#define IS_VLAN_IP (skb->protocol == __constant_htons(ETH_P_8021Q) && \
hdr->h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP) && \
brnf_filter_vlan_tagged)
#define IS_VLAN_ARP (skb->protocol == __constant_htons(ETH_P_8021Q) && \
hdr->h_vlan_encapsulated_proto == __constant_htons(ETH_P_ARP) && \
brnf_filter_vlan_tagged)
/* We need these fake structures to make netfilter happy -- /* We need these fake structures to make netfilter happy --
* lots of places assume that skb->dst != NULL, which isn't * lots of places assume that skb->dst != NULL, which isn't
...@@ -74,8 +88,7 @@ static struct rtable __fake_rtable = { ...@@ -74,8 +88,7 @@ static struct rtable __fake_rtable = {
.metrics = {[RTAX_MTU - 1] = 1500}, .metrics = {[RTAX_MTU - 1] = 1500},
} }
}, },
.rt_flags = 0,
.rt_flags = 0
}; };
...@@ -251,6 +264,11 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb, ...@@ -251,6 +264,11 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
struct sk_buff *skb = *pskb; struct sk_buff *skb = *pskb;
struct nf_bridge_info *nf_bridge; struct nf_bridge_info *nf_bridge;
#ifdef CONFIG_SYSCTL
if (!brnf_call_iptables)
return NF_ACCEPT;
#endif
if (skb->protocol != __constant_htons(ETH_P_IP)) { if (skb->protocol != __constant_htons(ETH_P_IP)) {
struct vlan_ethhdr *hdr = (struct vlan_ethhdr *) struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)
((*pskb)->mac.ethernet); ((*pskb)->mac.ethernet);
...@@ -373,7 +391,7 @@ static int br_nf_forward_finish(struct sk_buff *skb) ...@@ -373,7 +391,7 @@ static int br_nf_forward_finish(struct sk_buff *skb)
* because of the ipt_physdev.c module. For ARP, indev and outdev are the * because of the ipt_physdev.c module. For ARP, indev and outdev are the
* bridge ports. * bridge ports.
*/ */
static unsigned int br_nf_forward(unsigned int hook, struct sk_buff **pskb, static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb,
const struct net_device *in, const struct net_device *out, const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *)) int (*okfn)(struct sk_buff *))
{ {
...@@ -381,9 +399,13 @@ static unsigned int br_nf_forward(unsigned int hook, struct sk_buff **pskb, ...@@ -381,9 +399,13 @@ static unsigned int br_nf_forward(unsigned int hook, struct sk_buff **pskb,
struct nf_bridge_info *nf_bridge; struct nf_bridge_info *nf_bridge;
struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)(skb->mac.ethernet); struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)(skb->mac.ethernet);
if (skb->protocol != __constant_htons(ETH_P_IP) && #ifdef CONFIG_SYSCTL
skb->protocol != __constant_htons(ETH_P_ARP)) { if (!skb->nf_bridge)
if (!IS_VLAN_IP && !IS_VLAN_ARP) return NF_ACCEPT;
#endif
if (skb->protocol != __constant_htons(ETH_P_IP)) {
if (!IS_VLAN_IP)
return NF_ACCEPT; return NF_ACCEPT;
skb_pull(*pskb, VLAN_HLEN); skb_pull(*pskb, VLAN_HLEN);
(*pskb)->nh.raw += VLAN_HLEN; (*pskb)->nh.raw += VLAN_HLEN;
...@@ -392,39 +414,58 @@ static unsigned int br_nf_forward(unsigned int hook, struct sk_buff **pskb, ...@@ -392,39 +414,58 @@ static unsigned int br_nf_forward(unsigned int hook, struct sk_buff **pskb,
#ifdef CONFIG_NETFILTER_DEBUG #ifdef CONFIG_NETFILTER_DEBUG
skb->nf_debug ^= (1 << NF_BR_FORWARD); skb->nf_debug ^= (1 << NF_BR_FORWARD);
#endif #endif
if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP) { nf_bridge = skb->nf_bridge;
nf_bridge = skb->nf_bridge; if (skb->pkt_type == PACKET_OTHERHOST) {
if (skb->pkt_type == PACKET_OTHERHOST) { skb->pkt_type = PACKET_HOST;
skb->pkt_type = PACKET_HOST; nf_bridge->mask |= BRNF_PKT_TYPE;
nf_bridge->mask |= BRNF_PKT_TYPE; }
}
/* The physdev module checks on this */ /* The physdev module checks on this */
nf_bridge->mask |= BRNF_BRIDGED; nf_bridge->mask |= BRNF_BRIDGED;
nf_bridge->physoutdev = skb->dev; nf_bridge->physoutdev = skb->dev;
NF_HOOK(PF_INET, NF_IP_FORWARD, skb, bridge_parent(in), NF_HOOK(PF_INET, NF_IP_FORWARD, skb, bridge_parent(in),
bridge_parent(out), br_nf_forward_finish); bridge_parent(out), br_nf_forward_finish);
} else {
struct net_device **d = (struct net_device **)(skb->cb);
struct arphdr *arp = skb->nh.arph;
if (arp->ar_pln != 4) { return NF_STOLEN;
if (IS_VLAN_ARP) { }
skb_push(*pskb, VLAN_HLEN);
(*pskb)->nh.raw -= VLAN_HLEN; static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff **pskb,
} const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct sk_buff *skb = *pskb;
struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)(skb->mac.ethernet);
struct net_device **d = (struct net_device **)(skb->cb);
if (!brnf_call_arptables)
return NF_ACCEPT;
if (skb->protocol != __constant_htons(ETH_P_ARP)) {
if (!IS_VLAN_ARP)
return NF_ACCEPT; return NF_ACCEPT;
skb_pull(*pskb, VLAN_HLEN);
(*pskb)->nh.raw += VLAN_HLEN;
}
#ifdef CONFIG_NETFILTER_DEBUG
skb->nf_debug ^= (1 << NF_BR_FORWARD);
#endif
if (skb->nh.arph->ar_pln != 4) {
if (IS_VLAN_ARP) {
skb_push(*pskb, VLAN_HLEN);
(*pskb)->nh.raw -= VLAN_HLEN;
} }
*d = (struct net_device *)in; return NF_ACCEPT;
NF_HOOK(NF_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in,
(struct net_device *)out, br_nf_forward_finish);
} }
*d = (struct net_device *)in;
NF_HOOK(NF_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in,
(struct net_device *)out, br_nf_forward_finish);
return NF_STOLEN; return NF_STOLEN;
} }
/* PF_BRIDGE/LOCAL_OUT ***********************************************/ /* PF_BRIDGE/LOCAL_OUT ***********************************************/
static int br_nf_local_out_finish(struct sk_buff *skb) static int br_nf_local_out_finish(struct sk_buff *skb)
{ {
...@@ -475,6 +516,11 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb, ...@@ -475,6 +516,11 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb,
struct nf_bridge_info *nf_bridge; struct nf_bridge_info *nf_bridge;
struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)(skb->mac.ethernet); struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)(skb->mac.ethernet);
#ifdef CONFIG_SYSCTL
if (!skb->nf_bridge)
return NF_ACCEPT;
#endif
if (skb->protocol != __constant_htons(ETH_P_IP) && !IS_VLAN_IP) if (skb->protocol != __constant_htons(ETH_P_IP) && !IS_VLAN_IP)
return NF_ACCEPT; return NF_ACCEPT;
...@@ -485,6 +531,7 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb, ...@@ -485,6 +531,7 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb,
return NF_ACCEPT; return NF_ACCEPT;
nf_bridge = skb->nf_bridge; nf_bridge = skb->nf_bridge;
nf_bridge->physoutdev = skb->dev; nf_bridge->physoutdev = skb->dev;
realindev = nf_bridge->physindev; realindev = nf_bridge->physindev;
...@@ -567,6 +614,11 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb, ...@@ -567,6 +614,11 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb,
return NF_ACCEPT; return NF_ACCEPT;
} }
#ifdef CONFIG_SYSCTL
if (!nf_bridge)
return NF_ACCEPT;
#endif
if (skb->protocol != __constant_htons(ETH_P_IP) && !IS_VLAN_IP) if (skb->protocol != __constant_htons(ETH_P_IP) && !IS_VLAN_IP)
return NF_ACCEPT; return NF_ACCEPT;
...@@ -632,6 +684,13 @@ static unsigned int ipv4_sabotage_out(unsigned int hook, struct sk_buff **pskb, ...@@ -632,6 +684,13 @@ static unsigned int ipv4_sabotage_out(unsigned int hook, struct sk_buff **pskb,
const struct net_device *in, const struct net_device *out, const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *)) int (*okfn)(struct sk_buff *))
{ {
struct sk_buff *skb = *pskb;
#ifdef CONFIG_SYSCTL
if (!brnf_call_iptables && !skb->nf_bridge)
return NF_ACCEPT;
#endif
if ((out->hard_start_xmit == br_dev_xmit && if ((out->hard_start_xmit == br_dev_xmit &&
okfn != br_nf_forward_finish && okfn != br_nf_forward_finish &&
okfn != br_nf_local_out_finish && okfn != br_nf_local_out_finish &&
...@@ -641,7 +700,6 @@ static unsigned int ipv4_sabotage_out(unsigned int hook, struct sk_buff **pskb, ...@@ -641,7 +700,6 @@ static unsigned int ipv4_sabotage_out(unsigned int hook, struct sk_buff **pskb,
VLAN_DEV_INFO(out)->real_dev->hard_start_xmit == br_dev_xmit) VLAN_DEV_INFO(out)->real_dev->hard_start_xmit == br_dev_xmit)
#endif #endif
) { ) {
struct sk_buff *skb = *pskb;
struct nf_bridge_info *nf_bridge; struct nf_bridge_info *nf_bridge;
if (!skb->nf_bridge && !nf_bridge_alloc(skb)) if (!skb->nf_bridge && !nf_bridge_alloc(skb))
...@@ -687,7 +745,12 @@ static struct nf_hook_ops br_nf_ops[] = { ...@@ -687,7 +745,12 @@ static struct nf_hook_ops br_nf_ops[] = {
.pf = PF_BRIDGE, .pf = PF_BRIDGE,
.hooknum = NF_BR_LOCAL_IN, .hooknum = NF_BR_LOCAL_IN,
.priority = NF_BR_PRI_BRNF, }, .priority = NF_BR_PRI_BRNF, },
{ .hook = br_nf_forward, { .hook = br_nf_forward_ip,
.owner = THIS_MODULE,
.pf = PF_BRIDGE,
.hooknum = NF_BR_FORWARD,
.priority = NF_BR_PRI_BRNF - 1, },
{ .hook = br_nf_forward_arp,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.pf = PF_BRIDGE, .pf = PF_BRIDGE,
.hooknum = NF_BR_FORWARD, .hooknum = NF_BR_FORWARD,
...@@ -724,6 +787,69 @@ static struct nf_hook_ops br_nf_ops[] = { ...@@ -724,6 +787,69 @@ static struct nf_hook_ops br_nf_ops[] = {
.priority = NF_IP_PRI_FIRST, }, .priority = NF_IP_PRI_FIRST, },
}; };
#ifdef CONFIG_SYSCTL
static
int brnf_sysctl_call_tables(ctl_table *ctl, int write, struct file * filp,
void *buffer, size_t *lenp)
{
int ret;
ret = proc_dointvec(ctl, write, filp, buffer, lenp);
if (write && *(int *)(ctl->data))
*(int *)(ctl->data) = 1;
return ret;
}
static ctl_table brnf_table[] = {
{
.ctl_name = NET_BRIDGE_NF_CALL_ARPTABLES,
.procname = "bridge-nf-call-arptables",
.data = &brnf_call_arptables,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &brnf_sysctl_call_tables,
},
{
.ctl_name = NET_BRIDGE_NF_CALL_IPTABLES,
.procname = "bridge-nf-call-iptables",
.data = &brnf_call_iptables,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &brnf_sysctl_call_tables,
},
{
.ctl_name = NET_BRIDGE_NF_FILTER_VLAN_TAGGED,
.procname = "bridge-nf-filter-vlan-tagged",
.data = &brnf_filter_vlan_tagged,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &brnf_sysctl_call_tables,
},
{ .ctl_name = 0 }
};
static ctl_table brnf_bridge_table[] = {
{
.ctl_name = NET_BRIDGE,
.procname = "bridge",
.mode = 0555,
.child = brnf_table,
},
{ .ctl_name = 0 }
};
static ctl_table brnf_net_table[] = {
{
.ctl_name = CTL_NET,
.procname = "net",
.mode = 0555,
.child = brnf_bridge_table,
},
{ .ctl_name = 0 }
};
#endif
int br_netfilter_init(void) int br_netfilter_init(void)
{ {
int i; int i;
...@@ -740,6 +866,16 @@ int br_netfilter_init(void) ...@@ -740,6 +866,16 @@ int br_netfilter_init(void)
return ret; return ret;
} }
#ifdef CONFIG_SYSCTL
brnf_sysctl_header = register_sysctl_table(brnf_net_table, 0);
if (brnf_sysctl_header == NULL) {
printk(KERN_WARNING "br_netfilter: can't register to sysctl.\n");
for (i = 0; i < ARRAY_SIZE(br_nf_ops); i++)
nf_unregister_hook(&br_nf_ops[i]);
return -EFAULT;
}
#endif
printk(KERN_NOTICE "Bridge firewalling registered\n"); printk(KERN_NOTICE "Bridge firewalling registered\n");
return 0; return 0;
...@@ -751,4 +887,7 @@ void br_netfilter_fini(void) ...@@ -751,4 +887,7 @@ void br_netfilter_fini(void)
for (i = ARRAY_SIZE(br_nf_ops) - 1; i >= 0; i--) for (i = ARRAY_SIZE(br_nf_ops) - 1; i >= 0; i--)
nf_unregister_hook(&br_nf_ops[i]); nf_unregister_hook(&br_nf_ops[i]);
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(brnf_sysctl_header);
#endif
} }
...@@ -98,7 +98,7 @@ static int neigh_blackhole(struct sk_buff *skb) ...@@ -98,7 +98,7 @@ static int neigh_blackhole(struct sk_buff *skb)
/* /*
* It is random distribution in the interval (1/2)*base...(3/2)*base. * It is random distribution in the interval (1/2)*base...(3/2)*base.
* It corresponds to default IPv6 settings and is not overridable, * It corresponds to default IPv6 settings and is not overridable,
* because it is really reasonbale choice. * because it is really reasonable choice.
*/ */
unsigned long neigh_rand_reach_time(unsigned long base) unsigned long neigh_rand_reach_time(unsigned long base)
...@@ -120,7 +120,7 @@ static int neigh_forced_gc(struct neigh_table *tbl) ...@@ -120,7 +120,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
while ((n = *np) != NULL) { while ((n = *np) != NULL) {
/* Neighbour record may be discarded if: /* Neighbour record may be discarded if:
- nobody refers to it. - nobody refers to it.
- it is not premanent - it is not permanent
- (NEW and probably wrong) - (NEW and probably wrong)
INCOMPLETE entries are kept at least for INCOMPLETE entries are kept at least for
n->parms->retrans_time, otherwise we could n->parms->retrans_time, otherwise we could
...@@ -510,7 +510,7 @@ static void neigh_suspect(struct neighbour *neigh) ...@@ -510,7 +510,7 @@ static void neigh_suspect(struct neighbour *neigh)
{ {
struct hh_cache *hh; struct hh_cache *hh;
NEIGH_PRINTK2("neigh %p is suspecteded.\n", neigh); NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
neigh->output = neigh->ops->output; neigh->output = neigh->ops->output;
...@@ -537,7 +537,7 @@ static void neigh_connect(struct neighbour *neigh) ...@@ -537,7 +537,7 @@ static void neigh_connect(struct neighbour *neigh)
/* /*
Transitions NUD_STALE <-> NUD_REACHABLE do not occur Transitions NUD_STALE <-> NUD_REACHABLE do not occur
when fast path is built: we have no timers assotiated with when fast path is built: we have no timers associated with
these states, we do not have time to check state when sending. these states, we do not have time to check state when sending.
neigh_periodic_timer check periodically neigh->confirmed neigh_periodic_timer check periodically neigh->confirmed
time and moves NUD_REACHABLE -> NUD_STALE. time and moves NUD_REACHABLE -> NUD_STALE.
...@@ -962,7 +962,7 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, ...@@ -962,7 +962,7 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
/* This function can be used in contexts, where only old dev_queue_xmit /* This function can be used in contexts, where only old dev_queue_xmit
worked, f.e. if you want to override normal output path (eql, shaper), worked, f.e. if you want to override normal output path (eql, shaper),
but resoltution is not made yet. but resolution is not made yet.
*/ */
int neigh_compat_output(struct sk_buff *skb) int neigh_compat_output(struct sk_buff *skb)
......
...@@ -207,9 +207,8 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, ...@@ -207,9 +207,8 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
case ND_OPT_MTU: case ND_OPT_MTU:
case ND_OPT_REDIRECT_HDR: case ND_OPT_REDIRECT_HDR:
if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) { if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) {
ND_PRINTK2((KERN_WARNING ND_PRINTK2("ndisc_parse_options(): duplicated ND6 option found: type=%d\n",
"ndisc_parse_options(): duplicated ND6 option found: type=%d\n", nd_opt->nd_opt_type);
nd_opt->nd_opt_type));
} else { } else {
ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt; ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt;
} }
...@@ -619,6 +618,7 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr, ...@@ -619,6 +618,7 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr,
1, &err); 1, &err);
if (skb == NULL) { if (skb == NULL) {
ND_PRINTK1("send_ns: alloc skb failed\n"); ND_PRINTK1("send_ns: alloc skb failed\n");
dst_release(dst);
return; return;
} }
...@@ -1166,9 +1166,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) ...@@ -1166,9 +1166,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
ND_PRINTK0("NDISC: router announcement with mtu = %d\n", ND_PRINTK0("NDISC: router announcement with mtu = %d\n",
mtu); mtu);
} }
} } else if (in6_dev->cnf.mtu6 != mtu) {
if (in6_dev->cnf.mtu6 != mtu) {
in6_dev->cnf.mtu6 = mtu; in6_dev->cnf.mtu6 = mtu;
if (rt) if (rt)
......
...@@ -532,7 +532,7 @@ static int nr_release(struct socket *sock) ...@@ -532,7 +532,7 @@ static int nr_release(struct socket *sock)
sk->sk_state = TCP_CLOSE; sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk); sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD); sock_orphan(sk);
sock_set_flag(sk, SOCK_DESTROY); sock_set_flag(sk, SOCK_DESTROY);
sk->sk_socket = NULL; sk->sk_socket = NULL;
break; break;
...@@ -727,6 +727,8 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr, ...@@ -727,6 +727,8 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
lock_sock(sk); lock_sock(sk);
continue; continue;
} }
current->state = TASK_RUNNING;
remove_wait_queue(sk->sk_sleep, &wait);
return -ERESTARTSYS; return -ERESTARTSYS;
} }
current->state = TASK_RUNNING; current->state = TASK_RUNNING;
...@@ -780,13 +782,18 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags) ...@@ -780,13 +782,18 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
current->state = TASK_INTERRUPTIBLE; current->state = TASK_INTERRUPTIBLE;
release_sock(sk); release_sock(sk);
if (flags & O_NONBLOCK) if (flags & O_NONBLOCK) {
current->state = TASK_RUNNING;
remove_wait_queue(sk->sk_sleep, &wait);
return -EWOULDBLOCK; return -EWOULDBLOCK;
}
if (!signal_pending(tsk)) { if (!signal_pending(tsk)) {
schedule(); schedule();
lock_sock(sk); lock_sock(sk);
continue; continue;
} }
current->state = TASK_RUNNING;
remove_wait_queue(sk->sk_sleep, &wait);
return -ERESTARTSYS; return -ERESTARTSYS;
} }
current->state = TASK_RUNNING; current->state = TASK_RUNNING;
...@@ -1377,7 +1384,7 @@ static int __init nr_proto_init(void) ...@@ -1377,7 +1384,7 @@ static int __init nr_proto_init(void)
{ {
int i; int i;
if (nr_ndevs > 0x7fffffff/sizeof(struct net_device)) { if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n"); printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n");
return -1; return -1;
} }
...@@ -1405,6 +1412,7 @@ static int __init nr_proto_init(void) ...@@ -1405,6 +1412,7 @@ static int __init nr_proto_init(void)
dev->base_addr = i; dev->base_addr = i;
if (register_netdev(dev)) { if (register_netdev(dev)) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n"); printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n");
free_netdev(dev);
goto fail; goto fail;
} }
dev_nr[i] = dev; dev_nr[i] = dev;
...@@ -1433,8 +1441,10 @@ static int __init nr_proto_init(void) ...@@ -1433,8 +1441,10 @@ static int __init nr_proto_init(void)
return 0; return 0;
fail: fail:
while (--i >= 0) while (--i >= 0) {
unregister_netdev(dev_nr[i]); unregister_netdev(dev_nr[i]);
free_netdev(dev_nr[i]);
}
kfree(dev_nr); kfree(dev_nr);
return -1; return -1;
} }
...@@ -1474,8 +1484,10 @@ static void __exit nr_exit(void) ...@@ -1474,8 +1484,10 @@ static void __exit nr_exit(void)
for (i = 0; i < nr_ndevs; i++) { for (i = 0; i < nr_ndevs; i++) {
struct net_device *dev = dev_nr[i]; struct net_device *dev = dev_nr[i];
if (dev) if (dev) {
unregister_netdev(dev); unregister_netdev(dev);
free_netdev(dev);
}
} }
kfree(dev_nr); kfree(dev_nr);
......
...@@ -204,7 +204,6 @@ void nr_setup(struct net_device *dev) ...@@ -204,7 +204,6 @@ void nr_setup(struct net_device *dev)
dev->hard_start_xmit = nr_xmit; dev->hard_start_xmit = nr_xmit;
dev->open = nr_open; dev->open = nr_open;
dev->stop = nr_close; dev->stop = nr_close;
dev->destructor = free_netdev;
dev->hard_header = nr_header; dev->hard_header = nr_header;
dev->hard_header_len = NR_NETWORK_LEN + NR_TRANSPORT_LEN; dev->hard_header_len = NR_NETWORK_LEN + NR_TRANSPORT_LEN;
......
...@@ -246,6 +246,10 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct ...@@ -246,6 +246,10 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
goto oom; goto oom;
/* drop any routing info */
dst_release(skb->dst);
skb->dst = NULL;
spkt = (struct sockaddr_pkt*)skb->cb; spkt = (struct sockaddr_pkt*)skb->cb;
skb_push(skb, skb->data-skb->mac.raw); skb_push(skb, skb->data-skb->mac.raw);
...@@ -486,6 +490,9 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packe ...@@ -486,6 +490,9 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
skb_set_owner_r(skb, sk); skb_set_owner_r(skb, sk);
skb->dev = NULL; skb->dev = NULL;
dst_release(skb->dst);
skb->dst = NULL;
spin_lock(&sk->sk_receive_queue.lock); spin_lock(&sk->sk_receive_queue.lock);
po->stats.tp_packets++; po->stats.tp_packets++;
__skb_queue_tail(&sk->sk_receive_queue, skb); __skb_queue_tail(&sk->sk_receive_queue, skb);
......
...@@ -359,7 +359,7 @@ void rose_destroy_socket(struct sock *sk) ...@@ -359,7 +359,7 @@ void rose_destroy_socket(struct sock *sk)
sk->sk_timer.data = (unsigned long)sk; sk->sk_timer.data = (unsigned long)sk;
add_timer(&sk->sk_timer); add_timer(&sk->sk_timer);
} else } else
sk_free(sk); sock_put(sk);
} }
/* /*
...@@ -634,7 +634,6 @@ static int rose_release(struct socket *sock) ...@@ -634,7 +634,6 @@ static int rose_release(struct socket *sock)
} }
sock->sk = NULL; sock->sk = NULL;
sk->sk_socket = NULL; /* Not used, but we should do this. **/
return 0; return 0;
} }
...@@ -813,6 +812,8 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le ...@@ -813,6 +812,8 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
schedule(); schedule();
continue; continue;
} }
current->state = TASK_RUNNING;
remove_wait_queue(sk->sk_sleep, &wait);
return -ERESTARTSYS; return -ERESTARTSYS;
} }
current->state = TASK_RUNNING; current->state = TASK_RUNNING;
...@@ -864,8 +865,11 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags) ...@@ -864,8 +865,11 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
current->state = TASK_INTERRUPTIBLE; current->state = TASK_INTERRUPTIBLE;
release_sock(sk); release_sock(sk);
if (flags & O_NONBLOCK) if (flags & O_NONBLOCK) {
current->state = TASK_RUNNING;
remove_wait_queue(sk->sk_sleep, &wait);
return -EWOULDBLOCK; return -EWOULDBLOCK;
}
if (!signal_pending(tsk)) { if (!signal_pending(tsk)) {
schedule(); schedule();
lock_sock(sk); lock_sock(sk);
...@@ -1482,7 +1486,7 @@ static int __init rose_proto_init(void) ...@@ -1482,7 +1486,7 @@ static int __init rose_proto_init(void)
rose_callsign = null_ax25_address; rose_callsign = null_ax25_address;
if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device)) { if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n"); printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n");
return -1; return -1;
} }
...@@ -1503,23 +1507,14 @@ static int __init rose_proto_init(void) ...@@ -1503,23 +1507,14 @@ static int __init rose_proto_init(void)
name, rose_setup); name, rose_setup);
if (!dev) { if (!dev) {
printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
while (--i >= 0) goto fail;
kfree(dev_rose[i]);
return -ENOMEM;
} }
dev_rose[i] = dev; if (register_netdev(dev)) {
}
for (i = 0; i < rose_ndevs; i++) {
if (register_netdev(dev_rose[i])) {
printk(KERN_ERR "ROSE: netdevice regeistration failed\n"); printk(KERN_ERR "ROSE: netdevice regeistration failed\n");
while (--i >= 0) { free_netdev(dev);
unregister_netdev(dev_rose[i]); goto fail;
kfree(dev_rose[i]);
return -EIO;
}
} }
dev_rose[i] = dev;
} }
sock_register(&rose_family_ops); sock_register(&rose_family_ops);
...@@ -1542,6 +1537,13 @@ static int __init rose_proto_init(void) ...@@ -1542,6 +1537,13 @@ static int __init rose_proto_init(void)
proc_net_fops_create("rose_routes", S_IRUGO, &rose_routes_fops); proc_net_fops_create("rose_routes", S_IRUGO, &rose_routes_fops);
return 0; return 0;
fail:
while (--i >= 0) {
unregister_netdev(dev_rose[i]);
free_netdev(dev_rose[i]);
}
kfree(dev_rose);
return -ENOMEM;
} }
module_init(rose_proto_init); module_init(rose_proto_init);
......
...@@ -247,10 +247,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) ...@@ -247,10 +247,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
*back = tp->next; *back = tp->next;
spin_unlock_bh(&dev->queue_lock); spin_unlock_bh(&dev->queue_lock);
write_unlock(&qdisc_tree_lock); write_unlock(&qdisc_tree_lock);
tcf_destroy(tp);
tp->ops->destroy(tp);
module_put(tp->ops->owner);
kfree(tp);
err = 0; err = 0;
goto errout; goto errout;
} }
......
...@@ -162,7 +162,7 @@ static void destroy_filters(struct atm_flow_data *flow) ...@@ -162,7 +162,7 @@ static void destroy_filters(struct atm_flow_data *flow)
while ((filter = flow->filter_list)) { while ((filter = flow->filter_list)) {
DPRINTK("destroy_filters: destroying filter %p\n",filter); DPRINTK("destroy_filters: destroying filter %p\n",filter);
flow->filter_list = filter->next; flow->filter_list = filter->next;
filter->ops->destroy(filter); tcf_destroy(filter);
} }
} }
......
...@@ -1705,7 +1705,7 @@ static void cbq_destroy_filters(struct cbq_class *cl) ...@@ -1705,7 +1705,7 @@ static void cbq_destroy_filters(struct cbq_class *cl)
while ((tp = cl->filter_list) != NULL) { while ((tp = cl->filter_list) != NULL) {
cl->filter_list = tp->next; cl->filter_list = tp->next;
tp->ops->destroy(tp); tcf_destroy(tp);
} }
} }
......
...@@ -752,7 +752,7 @@ csz_destroy(struct Qdisc* sch) ...@@ -752,7 +752,7 @@ csz_destroy(struct Qdisc* sch)
while ((tp = q->filter_list) != NULL) { while ((tp = q->filter_list) != NULL) {
q->filter_list = tp->next; q->filter_list = tp->next;
tp->ops->destroy(tp); tcf_destroy(tp);
} }
} }
......
...@@ -378,7 +378,7 @@ static void dsmark_destroy(struct Qdisc *sch) ...@@ -378,7 +378,7 @@ static void dsmark_destroy(struct Qdisc *sch)
while (p->filter_list) { while (p->filter_list) {
tp = p->filter_list; tp = p->filter_list;
p->filter_list = tp->next; p->filter_list = tp->next;
tp->ops->destroy(tp); tcf_destroy(tp);
} }
qdisc_destroy(p->q); qdisc_destroy(p->q);
p->q = &noop_qdisc; p->q = &noop_qdisc;
......
...@@ -1338,7 +1338,7 @@ static void htb_destroy_filters(struct tcf_proto **fl) ...@@ -1338,7 +1338,7 @@ static void htb_destroy_filters(struct tcf_proto **fl)
while ((tp = *fl) != NULL) { while ((tp = *fl) != NULL) {
*fl = tp->next; *fl = tp->next;
tp->ops->destroy(tp); tcf_destroy(tp);
} }
} }
......
...@@ -292,7 +292,7 @@ static void ingress_destroy(struct Qdisc *sch) ...@@ -292,7 +292,7 @@ static void ingress_destroy(struct Qdisc *sch)
while (p->filter_list) { while (p->filter_list) {
tp = p->filter_list; tp = p->filter_list;
p->filter_list = tp->next; p->filter_list = tp->next;
tp->ops->destroy(tp); tcf_destroy(tp);
} }
memset(p, 0, sizeof(*p)); memset(p, 0, sizeof(*p));
p->filter_list = NULL; p->filter_list = NULL;
......
...@@ -162,7 +162,7 @@ prio_destroy(struct Qdisc* sch) ...@@ -162,7 +162,7 @@ prio_destroy(struct Qdisc* sch)
while ((tp = q->filter_list) != NULL) { while ((tp = q->filter_list) != NULL) {
q->filter_list = tp->next; q->filter_list = tp->next;
tp->ops->destroy(tp); tcf_destroy(tp);
} }
for (prio=0; prio<q->bands; prio++) { for (prio=0; prio<q->bands; prio++) {
......
...@@ -108,6 +108,10 @@ ...@@ -108,6 +108,10 @@
Note that the peak rate TBF is much more tough: with MTU 1500 Note that the peak rate TBF is much more tough: with MTU 1500
P_crit = 150Kbytes/sec. So, if you need greater peak P_crit = 150Kbytes/sec. So, if you need greater peak
rates, use alpha with HZ=1000 :-) rates, use alpha with HZ=1000 :-)
With classful TBF, limit is just kept for backwards compatibility.
It is passed to the default bfifo qdisc - if the inner qdisc is
changed the limit is not effective anymore.
*/ */
struct tbf_sched_data struct tbf_sched_data
...@@ -136,7 +140,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -136,7 +140,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data; struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
int ret; int ret;
if (skb->len > q->max_size || sch->stats.backlog + skb->len > q->limit) { if (skb->len > q->max_size) {
sch->stats.drops++; sch->stats.drops++;
#ifdef CONFIG_NET_CLS_POLICE #ifdef CONFIG_NET_CLS_POLICE
if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
...@@ -152,7 +156,6 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -152,7 +156,6 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
} }
sch->q.qlen++; sch->q.qlen++;
sch->stats.backlog += skb->len;
sch->stats.bytes += skb->len; sch->stats.bytes += skb->len;
sch->stats.packets++; sch->stats.packets++;
return 0; return 0;
...@@ -163,10 +166,8 @@ static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -163,10 +166,8 @@ static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch)
struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data; struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
int ret; int ret;
if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) { if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0)
sch->q.qlen++; sch->q.qlen++;
sch->stats.backlog += skb->len;
}
return ret; return ret;
} }
...@@ -178,7 +179,6 @@ static unsigned int tbf_drop(struct Qdisc* sch) ...@@ -178,7 +179,6 @@ static unsigned int tbf_drop(struct Qdisc* sch)
if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) { if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) {
sch->q.qlen--; sch->q.qlen--;
sch->stats.backlog -= len;
sch->stats.drops++; sch->stats.drops++;
} }
return len; return len;
...@@ -224,7 +224,6 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch) ...@@ -224,7 +224,6 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
q->t_c = now; q->t_c = now;
q->tokens = toks; q->tokens = toks;
q->ptokens = ptoks; q->ptokens = ptoks;
sch->stats.backlog -= len;
sch->q.qlen--; sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED; sch->flags &= ~TCQ_F_THROTTLED;
return skb; return skb;
...@@ -253,7 +252,6 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch) ...@@ -253,7 +252,6 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) { if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
/* When requeue fails skb is dropped */ /* When requeue fails skb is dropped */
sch->q.qlen--; sch->q.qlen--;
sch->stats.backlog -= len;
sch->stats.drops++; sch->stats.drops++;
} }
...@@ -269,7 +267,6 @@ static void tbf_reset(struct Qdisc* sch) ...@@ -269,7 +267,6 @@ static void tbf_reset(struct Qdisc* sch)
qdisc_reset(q->qdisc); qdisc_reset(q->qdisc);
sch->q.qlen = 0; sch->q.qlen = 0;
sch->stats.backlog = 0;
PSCHED_GET_TIME(q->t_c); PSCHED_GET_TIME(q->t_c);
q->tokens = q->buffer; q->tokens = q->buffer;
q->ptokens = q->mtu; q->ptokens = q->mtu;
...@@ -456,7 +453,6 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, ...@@ -456,7 +453,6 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
*old = xchg(&q->qdisc, new); *old = xchg(&q->qdisc, new);
qdisc_reset(*old); qdisc_reset(*old);
sch->q.qlen = 0; sch->q.qlen = 0;
sch->stats.backlog = 0;
sch_tree_unlock(sch); sch_tree_unlock(sch);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment