Commit 744dd21f authored by Alexey Kuznetsov's avatar Alexey Kuznetsov Committed by David S. Miller

Net fixes:

- Handle downing of multicast routes properly, since they
refer potentially to multiple devices.
- IPv6 raw had missing sk->filter handling and rawv6_rcv
missing some checksum processing.
- IPV6 udp needs to use skb_checksum not csum_partial on
skb->h.raw
- Ingress packet scheduler fixes from Jamal
- Addrconf bug discoverd by Petr Baudis, ipv6_ifa_notify should
pass NULL second arg to ipv6_chk_addr in RTM_DELADDR
handling.
parent c39a32ae
......@@ -604,6 +604,11 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
struct net_device *dev = ptr;
struct in_device *in_dev = __in_dev_get(dev);
if (event == NETDEV_UNREGISTER) {
fib_disable_ip(dev, 2);
return NOTIFY_DONE;
}
if (!in_dev)
return NOTIFY_DONE;
......@@ -620,9 +625,6 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
case NETDEV_DOWN:
fib_disable_ip(dev, 0);
break;
case NETDEV_UNREGISTER:
fib_disable_ip(dev, 1);
break;
case NETDEV_CHANGEMTU:
case NETDEV_CHANGE:
rt_cache_flush(0);
......
......@@ -185,6 +185,7 @@ int ip_fib_check_default(u32 gw, struct net_device *dev)
continue;
for_nexthops(fi) {
if (nh->nh_dev == dev && nh->nh_gw == gw &&
nh->nh_scope == RT_SCOPE_LINK &&
!(nh->nh_flags&RTNH_F_DEAD)) {
read_unlock(&fib_info_lock);
return 0;
......@@ -379,15 +380,23 @@ static int fib_check_nh(const struct rtmsg *r, struct fib_info *fi, struct fib_n
/* It is not necessary, but requires a bit of thinking */
if (key.scope < RT_SCOPE_LINK)
key.scope = RT_SCOPE_LINK;
if ((err = fib_lookup(&key, &res)) != 0)
return err;
err = -EINVAL;
if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
goto out;
nh->nh_scope = res.scope;
nh->nh_oif = FIB_RES_OIF(res);
nh->nh_dev = FIB_RES_DEV(res);
if (nh->nh_dev)
if ((nh->nh_dev = FIB_RES_DEV(res)) == NULL)
goto out;
atomic_inc(&nh->nh_dev->refcnt);
err = -ENETDOWN;
if (!(nh->nh_dev->flags & IFF_UP))
goto out;
err = 0;
out:
fib_res_put(&res);
return err;
} else {
struct in_device *in_dev;
......@@ -875,13 +884,15 @@ int fib_sync_down(u32 local, struct net_device *dev, int force)
fi->fib_power -= nh->nh_power;
nh->nh_power = 0;
spin_unlock_bh(&fib_multipath_lock);
if (force && nh->nh_dev) {
dev_put(nh->nh_dev);
nh->nh_dev = NULL;
}
#endif
dead++;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (force > 1 && nh->nh_dev == dev) {
dead = fi->fib_nhs;
break;
}
#endif
} endfor_nexthops(fi)
if (dead == fi->fib_nhs) {
fi->fib_flags |= RTNH_F_DEAD;
......@@ -914,10 +925,6 @@ int fib_sync_up(struct net_device *dev)
alive++;
continue;
}
if (nh->nh_dev == NULL && nh->nh_oif == dev->ifindex) {
dev_hold(dev);
nh->nh_dev = dev;
}
if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP))
continue;
if (nh->nh_dev != dev || __in_dev_get(dev) == NULL)
......
......@@ -1833,7 +1833,7 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
break;
case RTM_DELADDR:
addrconf_leave_solict(ifp->idev->dev, &ifp->addr);
if (!ipv6_chk_addr(&ifp->addr, ifp->idev->dev))
if (!ipv6_chk_addr(&ifp->addr, NULL))
ip6_rt_addr_del(&ifp->addr, ifp->idev->dev);
break;
}
......
......@@ -281,6 +281,16 @@ void rawv6_err(struct sock *sk, struct sk_buff *skb,
static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
#if defined(CONFIG_FILTER)
if (sk->filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
IP6_INC_STATS_BH(Ip6InDiscards);
kfree_skb(skb);
return 0;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
#endif
/* Charge it to the socket. */
if (sock_queue_rcv_skb(sk,skb)<0) {
IP6_INC_STATS_BH(Ip6InDiscards);
......@@ -302,10 +312,35 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
{
struct inet_opt *inet = inet_sk(sk);
struct raw6_opt *raw_opt = raw6_sk(sk);
if (!raw_opt->checksum)
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
if (skb->ip_summed == CHECKSUM_HW) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (csum_ipv6_magic(&skb->nh.ipv6h->saddr,
&skb->nh.ipv6h->daddr,
skb->len, inet->num, skb->csum)) {
NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "raw v6 hw csum failure.\n"));
skb->ip_summed = CHECKSUM_NONE;
}
}
if (skb->ip_summed == CHECKSUM_NONE)
skb->csum = ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
&skb->nh.ipv6h->daddr,
skb->len, inet->num, 0);
}
if (inet->hdrincl) {
__skb_push(skb, skb->nh.raw - skb->data);
skb->h.raw = skb->nh.raw;
if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
(unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
IP6_INC_STATS_BH(Ip6InDiscards);
kfree_skb(skb);
return 0;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
rawv6_rcv_skb(sk, skb);
......@@ -345,7 +380,17 @@ int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, int len,
msg->msg_flags |= MSG_TRUNC;
}
if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
} else if (msg->msg_flags&MSG_TRUNC) {
if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
goto csum_copy_err;
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
} else {
err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
if (err == -EINVAL)
goto csum_copy_err;
}
if (err)
goto out_free;
......@@ -372,6 +417,27 @@ int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, int len,
skb_free_datagram(sk, skb);
out:
return err;
csum_copy_err:
/* Clear queue. */
if (flags&MSG_PEEK) {
int clear = 0;
spin_lock_irq(&sk->receive_queue.lock);
if (skb == skb_peek(&sk->receive_queue)) {
__skb_unlink(skb, &sk->receive_queue);
clear = 1;
}
spin_unlock_irq(&sk->receive_queue.lock);
if (clear)
kfree_skb(skb);
}
/* Error for blocking case is chosen to masquerade
as some normal condition.
*/
err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
IP6_INC_STATS_USER(Ip6InDiscards);
goto out_free;
}
/*
......
......@@ -508,7 +508,7 @@ static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
{
#if defined(CONFIG_FILTER)
if (sk->filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
if ((unsigned short)csum_fold(csum_partial(skb->h.raw, skb->len, skb->csum))) {
if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
UDP6_INC_STATS_BH(UdpInErrors);
IP6_INC_STATS_BH(Ip6InDiscards);
kfree_skb(skb);
......
......@@ -43,6 +43,9 @@
#define PRIV(sch) ((struct ingress_qdisc_data *) (sch)->data)
/* Thanks to Doron Oz for this hack
*/
static int nf_registered = 0;
struct ingress_qdisc_data {
struct Qdisc *q;
......@@ -147,15 +150,21 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch)
#ifdef CONFIG_NET_CLS_POLICE
case TC_POLICE_SHOT:
result = NF_DROP;
sch->stats.drops++;
break;
case TC_POLICE_RECLASSIFY: /* DSCP remarking here ? */
case TC_POLICE_OK:
case TC_POLICE_UNSPEC:
default:
sch->stats.packets++;
sch->stats.bytes += skb->len;
result = NF_ACCEPT;
break;
}
#else
sch->stats.packets++;
sch->stats.bytes += skb->len;
#endif
};
skb->tc_index = TC_H_MIN(res.classid);
return result;
......@@ -236,22 +245,21 @@ int ingress_init(struct Qdisc *sch,struct rtattr *opt)
{
struct ingress_qdisc_data *p = PRIV(sch);
if (!nf_registered) {
if (nf_register_hook(&ing_ops) < 0) {
printk("ingress qdisc registration error \n");
goto error;
}
nf_registered++;
}
DPRINTK("ingress_init(sch %p,[qdisc %p],opt %p)\n",sch,p,opt);
memset(p, 0, sizeof(*p));
p->filter_list = NULL;
p->q = &noop_qdisc;
#ifndef MODULE
if (nf_register_hook(&ing_ops) < 0) {
printk("Unable to register ingress \n");
goto error;
}
#endif
DPRINTK("ingress_init: qdisc %p\n", sch);
MOD_INC_USE_COUNT;
return 0;
#ifndef MODULE
error:
#endif
return -EINVAL;
}
......@@ -296,11 +304,8 @@ static void ingress_destroy(struct Qdisc *sch)
qdisc_destroy(p->q);
#endif
#ifndef MODULE
nf_unregister_hook(&ing_ops);
#endif
MOD_DEC_USE_COUNT;
}
......@@ -356,6 +361,7 @@ struct Qdisc_ops ingress_qdisc_ops =
ingress_dump, /* dump */
};
#ifdef MODULE
int init_module(void)
{
......@@ -366,20 +372,15 @@ int init_module(void)
return ret;
}
if (nf_register_hook(&ing_ops) < 0) {
printk("Unable to register ingress on hook \n");
unregister_qdisc(&ingress_qdisc_ops);
return 0;
}
return ret;
}
void cleanup_module(void)
{
nf_unregister_hook(&ing_ops);
unregister_qdisc(&ingress_qdisc_ops);
if (nf_registered)
nf_unregister_hook(&ing_ops);
}
#endif
MODULE_LICENSE("GPL");
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment