Commit 2f5a8caa authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/net-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents eb0c34c8 4123def9
...@@ -215,6 +215,8 @@ struct hh_cache ...@@ -215,6 +215,8 @@ struct hh_cache
*/ */
#define LL_RESERVED_SPACE(dev) \ #define LL_RESERVED_SPACE(dev) \
(((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) (((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
/* These flag bits are private to the generic network queueing /* These flag bits are private to the generic network queueing
* layer, they may not be explicitly referenced by any other * layer, they may not be explicitly referenced by any other
......
...@@ -567,7 +567,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) ...@@ -567,7 +567,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
#ifdef CONFIG_BRIDGE_NETFILTER #ifdef CONFIG_BRIDGE_NETFILTER
/* for bridged IP traffic encapsulated inside f.e. a vlan header, /* for bridged IP traffic encapsulated inside f.e. a vlan header,
* we need to make room for the encapsulating header */ * we need to make room for the encapsulating header */
ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev + nf_bridge_pad(skb)); ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, nf_bridge_pad(skb));
mtu -= nf_bridge_pad(skb); mtu -= nf_bridge_pad(skb);
#else #else
ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev); ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev);
......
...@@ -479,6 +479,7 @@ static int ipip_rcv(struct sk_buff *skb) ...@@ -479,6 +479,7 @@ static int ipip_rcv(struct sk_buff *skb)
read_lock(&ipip_lock); read_lock(&ipip_lock);
if ((tunnel = ipip_tunnel_lookup(iph->saddr, iph->daddr)) != NULL) { if ((tunnel = ipip_tunnel_lookup(iph->saddr, iph->daddr)) != NULL) {
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
read_unlock(&ipip_lock);
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
} }
......
...@@ -283,7 +283,8 @@ void rawv6_err(struct sock *sk, struct sk_buff *skb, ...@@ -283,7 +283,8 @@ void rawv6_err(struct sock *sk, struct sk_buff *skb,
static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
{ {
if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) { if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
skb->ip_summed != CHECKSUM_UNNECESSARY) {
if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) { if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
/* FIXME: increment a raw6 drops counter here */ /* FIXME: increment a raw6 drops counter here */
kfree_skb(skb); kfree_skb(skb);
...@@ -452,6 +453,10 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct r ...@@ -452,6 +453,10 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct r
struct sk_buff *skb; struct sk_buff *skb;
int err = 0; int err = 0;
u16 *csum; u16 *csum;
u32 tmp_csum;
if (!opt->checksum)
goto send;
if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
goto out; goto out;
...@@ -463,29 +468,32 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct r ...@@ -463,29 +468,32 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct r
goto out; goto out;
} }
/* should be check HW csum miyazawa */
if (skb_queue_len(&sk->sk_write_queue) == 1) { if (skb_queue_len(&sk->sk_write_queue) == 1) {
/* /*
* Only one fragment on the socket. * Only one fragment on the socket.
*/ */
/* should be check HW csum miyazawa */ tmp_csum = skb->csum;
*csum = csum_ipv6_magic(&fl->fl6_src,
&fl->fl6_dst,
len, fl->proto, skb->csum);
} else { } else {
u32 tmp_csum = 0; tmp_csum = 0;
skb_queue_walk(&sk->sk_write_queue, skb) { skb_queue_walk(&sk->sk_write_queue, skb) {
tmp_csum = csum_add(tmp_csum, skb->csum); tmp_csum = csum_add(tmp_csum, skb->csum);
} }
tmp_csum = csum_ipv6_magic(&fl->fl6_src,
&fl->fl6_dst,
len, fl->proto, tmp_csum);
*csum = tmp_csum;
} }
/* in case cksum was not initialized */
if (unlikely(*csum))
tmp_csum = csum_sub(tmp_csum, *csum);
*csum = csum_ipv6_magic(&fl->fl6_src,
&fl->fl6_dst,
len, fl->proto, tmp_csum);
if (*csum == 0) if (*csum == 0)
*csum = -1; *csum = -1;
ip6_push_pending_frames(sk); send:
err = ip6_push_pending_frames(sk);
out: out:
return err; return err;
} }
...@@ -702,13 +710,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, ...@@ -702,13 +710,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
if (err) if (err)
ip6_flush_pending_frames(sk); ip6_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE)) { else if (!(msg->msg_flags & MSG_MORE))
if (raw_opt->checksum) { err = rawv6_push_pending_frames(sk, &fl, raw_opt, len);
err = rawv6_push_pending_frames(sk, &fl, raw_opt, len);
} else {
err = ip6_push_pending_frames(sk);
}
}
} }
done: done:
ip6_dst_store(sk, dst, ip6_dst_store(sk, dst,
......
...@@ -489,15 +489,16 @@ int xfrm_state_update(struct xfrm_state *x) ...@@ -489,15 +489,16 @@ int xfrm_state_update(struct xfrm_state *x)
memcpy(x1->encap, x->encap, sizeof(*x1->encap)); memcpy(x1->encap, x->encap, sizeof(*x1->encap));
memcpy(&x1->lft, &x->lft, sizeof(x1->lft)); memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
x1->km.dying = 0; x1->km.dying = 0;
if (!mod_timer(&x1->timer, jiffies + HZ))
xfrm_state_hold(x1);
if (x1->curlft.use_time)
xfrm_state_check_expire(x1);
err = 0; err = 0;
} }
spin_unlock_bh(&x1->lock); spin_unlock_bh(&x1->lock);
if (!mod_timer(&x1->timer, jiffies + HZ))
xfrm_state_hold(x1);
if (x1->curlft.use_time)
xfrm_state_check_expire(x1);
xfrm_state_put(x1); xfrm_state_put(x1);
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment