Commit 08f34001 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/net-2.5

into home.osdl.org:/home/torvalds/v2.5/linux
parents 6e68be4e b10dd5ac
......@@ -185,6 +185,7 @@ Original developers of the crypto algorithms:
Matthew Skala (Twofish)
Dag Arne Osvik (Serpent)
Brian Gladman (AES)
Kartikey Mahendra Bhatt (CAST6)
SHA1 algorithm contributors:
Jean-Francois Dive
......@@ -213,7 +214,7 @@ AES algorithm contributors:
Kyle McMartin
Adam J. Richter
CAST5/CAST6 algorithm contributors:
CAST5 algorithm contributors:
Kartikey Mahendra Bhatt (original developers unknown, FSF copyright).
Generic scatterwalk code by Adam J. Richter <adam@yggdrasil.com>
......
......@@ -442,7 +442,6 @@ static struct crypto_alg aes_alg = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_ivsize = AES_BLOCK_SIZE,
.cia_setkey = aes_set_key,
.cia_encrypt = aes_encrypt,
.cia_decrypt = aes_decrypt
......
......@@ -456,7 +456,6 @@ static struct crypto_alg alg = {
.cra_u = { .cipher = {
.cia_min_keysize = BF_MIN_KEY_SIZE,
.cia_max_keysize = BF_MAX_KEY_SIZE,
.cia_ivsize = BF_BLOCK_SIZE,
.cia_setkey = bf_setkey,
.cia_encrypt = bf_encrypt,
.cia_decrypt = bf_decrypt } }
......
......@@ -826,7 +826,6 @@ static struct crypto_alg alg = {
.cipher = {
.cia_min_keysize = CAST5_MIN_KEY_SIZE,
.cia_max_keysize = CAST5_MAX_KEY_SIZE,
.cia_ivsize = CAST5_BLOCK_SIZE,
.cia_setkey = cast5_setkey,
.cia_encrypt = cast5_encrypt,
.cia_decrypt = cast5_decrypt
......
......@@ -539,7 +539,6 @@ static struct crypto_alg alg = {
.cipher = {
.cia_min_keysize = CAST6_MIN_KEY_SIZE,
.cia_max_keysize = CAST6_MAX_KEY_SIZE,
.cia_ivsize = CAST6_BLOCK_SIZE,
.cia_setkey = cast6_setkey,
.cia_encrypt = cast6_encrypt,
.cia_decrypt = cast6_decrypt}
......
......@@ -345,7 +345,6 @@ int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
int crypto_init_cipher_ops(struct crypto_tfm *tfm)
{
int ret = 0;
struct crypto_alg *alg = tfm->__crt_alg;
struct cipher_tfm *ops = &tfm->crt_cipher;
ops->cit_setkey = setkey;
......@@ -381,8 +380,7 @@ int crypto_init_cipher_ops(struct crypto_tfm *tfm)
BUG();
}
if (alg->cra_cipher.cia_ivsize &&
ops->cit_mode != CRYPTO_TFM_MODE_ECB) {
if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
switch (crypto_tfm_alg_blocksize(tfm)) {
case 8:
......@@ -401,7 +399,8 @@ int crypto_init_cipher_ops(struct crypto_tfm *tfm)
goto out;
}
ops->cit_iv = kmalloc(alg->cra_cipher.cia_ivsize, GFP_KERNEL);
ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm);
ops->cit_iv = kmalloc(ops->cit_ivsize, GFP_KERNEL);
if (ops->cit_iv == NULL)
ret = -ENOMEM;
}
......
......@@ -89,7 +89,6 @@ static struct crypto_alg cipher_null = {
.cra_u = { .cipher = {
.cia_min_keysize = NULL_KEY_SIZE,
.cia_max_keysize = NULL_KEY_SIZE,
.cia_ivsize = 0,
.cia_setkey = null_setkey,
.cia_encrypt = null_encrypt,
.cia_decrypt = null_decrypt } }
......
......@@ -1249,7 +1249,6 @@ static struct crypto_alg des_alg = {
.cra_u = { .cipher = {
.cia_min_keysize = DES_KEY_SIZE,
.cia_max_keysize = DES_KEY_SIZE,
.cia_ivsize = DES_BLOCK_SIZE,
.cia_setkey = des_setkey,
.cia_encrypt = des_encrypt,
.cia_decrypt = des_decrypt } }
......@@ -1265,7 +1264,6 @@ static struct crypto_alg des3_ede_alg = {
.cra_u = { .cipher = {
.cia_min_keysize = DES3_EDE_KEY_SIZE,
.cia_max_keysize = DES3_EDE_KEY_SIZE,
.cia_ivsize = DES3_EDE_BLOCK_SIZE,
.cia_setkey = des3_ede_setkey,
.cia_encrypt = des3_ede_encrypt,
.cia_decrypt = des3_ede_decrypt } }
......
......@@ -62,8 +62,6 @@ static int c_show(struct seq_file *m, void *p)
alg->cra_cipher.cia_min_keysize);
seq_printf(m, "max keysize : %u\n",
alg->cra_cipher.cia_max_keysize);
seq_printf(m, "ivsize : %u\n",
alg->cra_cipher.cia_ivsize);
break;
case CRYPTO_ALG_TYPE_DIGEST:
......
......@@ -483,7 +483,6 @@ static struct crypto_alg serpent_alg = {
.cra_u = { .cipher = {
.cia_min_keysize = SERPENT_MIN_KEY_SIZE,
.cia_max_keysize = SERPENT_MAX_KEY_SIZE,
.cia_ivsize = SERPENT_BLOCK_SIZE,
.cia_setkey = setkey,
.cia_encrypt = encrypt,
.cia_decrypt = decrypt } }
......
......@@ -877,7 +877,6 @@ static struct crypto_alg alg = {
.cra_u = { .cipher = {
.cia_min_keysize = TF_MIN_KEY_SIZE,
.cia_max_keysize = TF_MAX_KEY_SIZE,
.cia_ivsize = TF_BLOCK_SIZE,
.cia_setkey = twofish_setkey,
.cia_encrypt = twofish_encrypt,
.cia_decrypt = twofish_decrypt } }
......
......@@ -2019,8 +2019,7 @@ static int eni_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flgs)
* segmentation buffer descriptors of this VCC.
*/
tasklet_disable(&eni_dev->task);
for (skb = eni_dev->tx_queue.next; skb !=
(struct sk_buff *) &eni_dev->tx_queue; skb = skb->next) {
skb_queue_walk(&eni_dev->tx_queue, skb) {
unsigned long dsc;
if (ATM_SKB(skb)->vcc != vcc) continue;
......
......@@ -128,17 +128,13 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
* instead are lobbed from tx queue to rx queue
*/
if(atomic_read(&skb->users) != 1)
{
if (skb_shared(skb)) {
struct sk_buff *skb2=skb;
skb=skb_clone(skb, GFP_ATOMIC); /* Clone the buffer */
if(skb==NULL) {
kfree_skb(skb2);
if (unlikely(skb==NULL))
return 0;
}
kfree_skb(skb2);
}
else
} else
skb_orphan(skb);
skb->protocol=eth_type_trans(skb,dev);
......@@ -148,12 +144,8 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
if (skb_shinfo(skb)->tso_size) {
struct iphdr *iph = skb->nh.iph;
if (skb->protocol != htons(ETH_P_IP))
BUG();
if (iph->protocol != IPPROTO_TCP)
BUG();
BUG_ON(skb->protocol != htons(ETH_P_IP));
BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
emulate_large_send_offload(skb);
return 0;
......
......@@ -65,7 +65,6 @@ struct scatterlist;
struct cipher_alg {
unsigned int cia_min_keysize;
unsigned int cia_max_keysize;
unsigned int cia_ivsize;
int (*cia_setkey)(void *ctx, const u8 *key,
unsigned int keylen, u32 *flags);
void (*cia_encrypt)(void *ctx, u8 *dst, const u8 *src);
......@@ -128,6 +127,7 @@ struct crypto_tfm;
struct cipher_tfm {
void *cit_iv;
unsigned int cit_ivsize;
u32 cit_mode;
int (*cit_setkey)(struct crypto_tfm *tfm,
const u8 *key, unsigned int keylen);
......@@ -237,7 +237,7 @@ static inline unsigned int crypto_tfm_alg_max_keysize(struct crypto_tfm *tfm)
static inline unsigned int crypto_tfm_alg_ivsize(struct crypto_tfm *tfm)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
return tfm->__crt_alg->cra_cipher.cia_ivsize;
return tfm->crt_cipher.cit_ivsize;
}
static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
......
......@@ -353,7 +353,9 @@ extern int ip6_push_pending_frames(struct sock *sk);
extern void ip6_flush_pending_frames(struct sock *sk);
extern struct dst_entry * ip6_dst_lookup(struct sock *sk, struct flowi *fl);
extern int ip6_dst_lookup(struct sock *sk,
struct dst_entry **dst,
struct flowi *fl);
/*
* skb processing functions
......
......@@ -308,6 +308,9 @@ static void ah_destroy(struct xfrm_state *x)
{
struct ah_data *ahp = x->data;
if (!ahp)
return;
if (ahp->work_icv) {
kfree(ahp->work_icv);
ahp->work_icv = NULL;
......
......@@ -437,6 +437,9 @@ void esp_destroy(struct xfrm_state *x)
{
struct esp_data *esp = x->data;
if (!esp)
return;
if (esp->conf.tfm) {
crypto_free_tfm(esp->conf.tfm);
esp->conf.tfm = NULL;
......@@ -505,6 +508,9 @@ int esp_init_state(struct xfrm_state *x, void *args)
}
esp->conf.key = x->ealg->alg_key;
esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
if (x->props.ealgo == SADB_EALG_NULL)
esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB);
else
esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC);
if (esp->conf.tfm == NULL)
goto error;
......
......@@ -336,6 +336,8 @@ static void ipcomp_free_data(struct ipcomp_data *ipcd)
static void ipcomp_destroy(struct xfrm_state *x)
{
struct ipcomp_data *ipcd = x->data;
if (!ipcd)
return;
ipcomp_free_data(ipcd);
kfree(ipcd);
}
......@@ -354,7 +356,6 @@ static int ipcomp_init_state(struct xfrm_state *x, void *args)
x->props.header_len = sizeof(struct ip_comp_hdr);
if (x->props.mode)
x->props.header_len += sizeof(struct iphdr);
x->data = ipcd;
ipcd->scratch = kmalloc(IPCOMP_SCRATCH_SIZE, GFP_KERNEL);
if (!ipcd->scratch)
......@@ -373,6 +374,7 @@ static int ipcomp_init_state(struct xfrm_state *x, void *args)
calg_desc = xfrm_calg_get_byname(x->calg->alg_name);
BUG_ON(!calg_desc);
ipcd->threshold = calg_desc->uinfo.comp.threshold;
x->data = ipcd;
err = 0;
out:
return err;
......
......@@ -463,7 +463,9 @@ out: return ret;
*/
static inline u32 rt_score(struct rtable *rt)
{
u32 score = rt->u.dst.__use;
u32 score = jiffies - rt->u.dst.lastuse;
score = ~score & ~(3<<30);
if (rt_valuable(rt))
score |= (1<<31);
......@@ -805,8 +807,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
* The second limit is less certain. At the moment it allows
* only 2 entries per bucket. We will see.
*/
if (chain_length > ip_rt_gc_elasticity ||
(chain_length > 1 && !(min_score & (1<<31)))) {
if (chain_length > ip_rt_gc_elasticity) {
*candp = cand->u.rt_next;
rt_free(cand);
}
......
......@@ -1884,6 +1884,7 @@ static int tcp_v4_reselect_saddr(struct sock *sk)
__sk_dst_set(sk, &rt->u.dst);
tcp_v4_setup_caps(sk, &rt->u.dst);
tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
new_saddr = rt->rt_src;
......@@ -1943,6 +1944,7 @@ int tcp_v4_rebuild_header(struct sock *sk)
if (!err) {
__sk_dst_set(sk, &rt->u.dst);
tcp_v4_setup_caps(sk, &rt->u.dst);
tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
return 0;
}
......
......@@ -444,6 +444,9 @@ static void ah6_destroy(struct xfrm_state *x)
{
struct ah_data *ahp = x->data;
if (!ahp)
return;
if (ahp->work_icv) {
kfree(ahp->work_icv);
ahp->work_icv = NULL;
......
......@@ -342,6 +342,9 @@ void esp6_destroy(struct xfrm_state *x)
{
struct esp_data *esp = x->data;
if (!esp)
return;
if (esp->conf.tfm) {
crypto_free_tfm(esp->conf.tfm);
esp->conf.tfm = NULL;
......@@ -409,6 +412,9 @@ int esp6_init_state(struct xfrm_state *x, void *args)
}
esp->conf.key = x->ealg->alg_key;
esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
if (x->props.ealgo == SADB_EALG_NULL)
esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB);
else
esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC);
if (esp->conf.tfm == NULL)
goto error;
......
......@@ -355,8 +355,9 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
fl.oif = np->mcast_oif;
dst = ip6_dst_lookup(sk, &fl);
if (dst->error) goto out;
err = ip6_dst_lookup(sk, &dst, &fl);
if (err)
goto out;
if (hlimit < 0) {
if (ipv6_addr_is_multicast(&fl.fl6_dst))
......@@ -375,7 +376,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
if (net_ratelimit())
printk(KERN_DEBUG "icmp: len problem\n");
__skb_push(skb, plen);
goto out;
goto out_dst_release;
}
idev = in6_dev_get(skb->dev);
......@@ -396,6 +397,8 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
out_put:
if (likely(idev != NULL))
in6_dev_put(idev);
out_dst_release:
dst_release(dst);
out:
icmpv6_xmit_unlock();
}
......@@ -434,9 +437,9 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
fl.oif = np->mcast_oif;
dst = ip6_dst_lookup(sk, &fl);
if (dst->error) goto out;
err = ip6_dst_lookup(sk, &dst, &fl);
if (err)
goto out;
if (hlimit < 0) {
if (ipv6_addr_is_multicast(&fl.fl6_dst))
......@@ -465,6 +468,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
out_put:
if (likely(idev != NULL))
in6_dev_put(idev);
dst_release(dst);
out:
icmpv6_xmit_unlock();
}
......
......@@ -1136,17 +1136,16 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
return err;
}
struct dst_entry *ip6_dst_lookup(struct sock *sk, struct flowi *fl)
int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
{
struct dst_entry *dst = NULL;
int err = 0;
if (sk) {
struct ipv6_pinfo *np = inet6_sk(sk);
dst = __sk_dst_check(sk, np->dst_cookie);
if (dst) {
struct rt6_info *rt = (struct rt6_info*)dst;
*dst = __sk_dst_check(sk, np->dst_cookie);
if (*dst) {
struct rt6_info *rt = (struct rt6_info*)*dst;
/* Yes, checking route validity in not connected
case is not very simple. Take into account,
......@@ -1170,39 +1169,41 @@ struct dst_entry *ip6_dst_lookup(struct sock *sk, struct flowi *fl)
ipv6_addr_cmp(&fl->fl6_dst, &rt->rt6i_dst.addr))
&& (np->daddr_cache == NULL ||
ipv6_addr_cmp(&fl->fl6_dst, np->daddr_cache)))
|| (fl->oif && fl->oif != dst->dev->ifindex)) {
dst = NULL;
|| (fl->oif && fl->oif != (*dst)->dev->ifindex)) {
*dst = NULL;
} else
dst_hold(dst);
dst_hold(*dst);
}
}
if (dst == NULL)
dst = ip6_route_output(sk, fl);
if (*dst == NULL)
*dst = ip6_route_output(sk, fl);
if (dst->error)
return dst;
if ((err = (*dst)->error))
goto out_err_release;
if (ipv6_addr_any(&fl->fl6_src)) {
err = ipv6_get_saddr(dst, &fl->fl6_dst, &fl->fl6_src);
err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
if (err) {
#if IP6_DEBUG >= 2
printk(KERN_DEBUG "ip6_build_xmit: "
printk(KERN_DEBUG "ip6_dst_lookup: "
"no available source address\n");
#endif
dst->error = err;
return dst;
}
goto out_err_release;
}
if (dst) {
if ((err = xfrm_lookup(&dst, fl, sk, 0)) < 0) {
dst->error = -ENETUNREACH;
}
if ((err = xfrm_lookup(dst, fl, sk, 0)) < 0) {
err = -ENETUNREACH;
goto out_err_release;
}
return dst;
return 0;
out_err_release:
dst_release(*dst);
*dst = NULL;
return err;
}
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb),
......@@ -1483,6 +1484,7 @@ int ip6_push_pending_frames(struct sock *sk)
np->cork.opt = NULL;
}
if (np->cork.rt) {
dst_release(&np->cork.rt->u.dst);
np->cork.rt = NULL;
}
if (np->cork.fl) {
......@@ -1509,6 +1511,7 @@ void ip6_flush_pending_frames(struct sock *sk)
np->cork.opt = NULL;
}
if (np->cork.rt) {
dst_release(&np->cork.rt->u.dst);
np->cork.rt = NULL;
}
if (np->cork.fl) {
......
......@@ -423,7 +423,7 @@ void ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (teli && teli == info - 2) {
tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
if (tel->encap_limit <= 1) {
if (tel->encap_limit == 0) {
if (net_ratelimit())
printk(KERN_WARNING
"%s: Too small encapsulation "
......@@ -669,7 +669,7 @@ int ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
struct ipv6hdr *ipv6h = skb->nh.ipv6h;
struct ipv6_txoptions *orig_opt = NULL;
struct ipv6_txoptions *opt = NULL;
__u8 encap_limit = 0;
int encap_limit = -1;
__u16 offset;
struct flowi fl;
struct ip6_flowlabel *fl_lbl = NULL;
......@@ -692,7 +692,7 @@ int ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
if ((offset = parse_tlv_tnl_enc_lim(skb, skb->nh.raw)) > 0) {
struct ipv6_tlv_tnl_enc_lim *tel;
tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->nh.raw[offset];
if (tel->encap_limit <= 1) {
if (tel->encap_limit == 0) {
icmpv6_send(skb, ICMPV6_PARAMPROB,
ICMPV6_HDR_FIELD, offset + 2, skb->dev);
goto tx_err;
......@@ -715,7 +715,7 @@ int ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
if (fl_lbl)
orig_opt = fl_lbl->opt;
}
if (encap_limit > 0) {
if (encap_limit >= 0) {
if (!(opt = merge_options(sk, encap_limit, orig_opt))) {
goto tx_err_free_fl_lbl;
}
......
......@@ -268,6 +268,8 @@ static void ipcomp6_free_data(struct ipcomp_data *ipcd)
static void ipcomp6_destroy(struct xfrm_state *x)
{
struct ipcomp_data *ipcd = x->data;
if (!ipcd)
return;
ipcomp6_free_data(ipcd);
kfree(ipcd);
}
......@@ -286,7 +288,6 @@ static int ipcomp6_init_state(struct xfrm_state *x, void *args)
x->props.header_len = sizeof(struct ipv6_comp_hdr);
if (x->props.mode)
x->props.header_len += sizeof(struct ipv6hdr);
x->data = ipcd;
ipcd->scratch = kmalloc(IPCOMP_SCRATCH_SIZE, GFP_KERNEL);
if (!ipcd->scratch)
......@@ -299,6 +300,7 @@ static int ipcomp6_init_state(struct xfrm_state *x, void *args)
calg_desc = xfrm_calg_get_byname(x->calg->alg_name);
BUG_ON(!calg_desc);
ipcd->threshold = calg_desc->uinfo.comp.threshold;
x->data = ipcd;
err = 0;
out:
return err;
......
......@@ -658,8 +658,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg
if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
fl.oif = np->mcast_oif;
dst = ip6_dst_lookup(sk, &fl);
if ((err = dst->error))
err = ip6_dst_lookup(sk, &dst, &fl);
if (err)
goto out;
if (hlimit < 0) {
......
......@@ -432,6 +432,7 @@ void ip6_route_input(struct sk_buff *skb)
/* Race condition! In the gap, when rt6_lock was
released someone could insert this route. Relookup.
*/
dst_release(&rt->u.dst);
goto relookup;
}
dst_hold(&rt->u.dst);
......@@ -486,6 +487,7 @@ struct dst_entry * ip6_route_output(struct sock *sk, struct flowi *fl)
/* Race condition! In the gap, when rt6_lock was
released someone could insert this route. Relookup.
*/
dst_release(&rt->u.dst);
goto relookup;
}
dst_hold(&rt->u.dst);
......@@ -1094,8 +1096,8 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
*/
dst_set_expires(&nrt->u.dst, ip6_rt_mtu_expires);
nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
dst_release(&nrt->u.dst);
}
dst_release(&nrt->u.dst);
} else {
nrt = ip6_rt_copy(rt);
if (nrt == NULL)
......
......@@ -663,19 +663,12 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
}
dst = ip6_dst_lookup(sk, &fl);
err = ip6_dst_lookup(sk, &dst, &fl);
if ((err = dst->error) != 0) {
dst_release(dst);
if (err)
goto failure;
}
if (saddr == NULL) {
err = ipv6_get_saddr(dst, &np->daddr, &fl.fl6_src);
if (err) {
dst_release(dst);
goto failure;
}
saddr = &fl.fl6_src;
ipv6_addr_copy(&np->rcv_saddr, saddr);
}
......@@ -790,13 +783,14 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
fl.fl_ip_dport = inet->dport;
fl.fl_ip_sport = inet->sport;
dst = ip6_dst_lookup(sk, &fl);
if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
sk->sk_err_soft = -err;
goto out;
}
} else
dst_hold(dst);
if (dst->error) {
sk->sk_err_soft = -dst->error;
} else if (tp->pmtu_cookie > dst_pmtu(dst)) {
if (tp->pmtu_cookie > dst_pmtu(dst)) {
tcp_sync_mss(sk, dst_pmtu(dst));
tcp_simple_retransmit(sk);
} /* else let the usual retransmit timer handle it */
......@@ -891,8 +885,8 @@ static int tcp_v6_send_synack(struct sock *sk, struct open_request *req,
ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
}
dst = ip6_dst_lookup(sk, &fl);
if (dst->error)
err = ip6_dst_lookup(sk, &dst, &fl);
if (err)
goto done;
}
......@@ -1020,9 +1014,7 @@ static void tcp_v6_send_reset(struct sk_buff *skb)
fl.fl_ip_sport = t1->source;
/* sk = NULL, but it is safe for now. RST socket required. */
buff->dst = ip6_dst_lookup(NULL, &fl);
if (buff->dst->error == 0) {
if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
ip6_xmit(NULL, buff, &fl, NULL, 0);
TCP_INC_STATS_BH(TcpOutSegs);
TCP_INC_STATS_BH(TcpOutRsts);
......@@ -1083,9 +1075,7 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
fl.fl_ip_dport = t1->dest;
fl.fl_ip_sport = t1->source;
buff->dst = ip6_dst_lookup(NULL, &fl);
if (buff->dst->error == 0) {
if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
ip6_xmit(NULL, buff, &fl, NULL, 0);
TCP_INC_STATS_BH(TcpOutSegs);
return;
......@@ -1331,11 +1321,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
fl.fl_ip_dport = req->rmt_port;
fl.fl_ip_sport = inet_sk(sk)->sport;
dst = ip6_dst_lookup(sk, &fl);
}
if (dst->error)
if (ip6_dst_lookup(sk, &dst, &fl))
goto out;
}
newsk = tcp_create_openreq_child(sk, req, skb);
if (newsk == NULL)
......@@ -1347,7 +1335,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
#endif
ip6_dst_store(newsk, dst, NULL);
sk->sk_route_caps = dst->dev->features &
newsk->sk_route_caps = dst->dev->features &
~(NETIF_F_IP_CSUM | NETIF_F_TSO);
newtcp6sk = (struct tcp6_sock *)newsk;
......@@ -1730,11 +1718,9 @@ static int tcp_v6_rebuild_header(struct sock *sk)
ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
}
dst = ip6_dst_lookup(sk, &fl);
err = ip6_dst_lookup(sk, &dst, &fl);
if (dst->error) {
err = dst->error;
dst_release(dst);
if (err) {
sk->sk_route_caps = 0;
return err;
}
......@@ -1742,6 +1728,7 @@ static int tcp_v6_rebuild_header(struct sock *sk)
ip6_dst_store(sk, dst, NULL);
sk->sk_route_caps = dst->dev->features &
~(NETIF_F_IP_CSUM | NETIF_F_TSO);
tcp_sk(sk)->ext2_header_len = dst->header_len;
}
return 0;
......@@ -1773,15 +1760,17 @@ static int tcp_v6_xmit(struct sk_buff *skb, int ipfragok)
dst = __sk_dst_check(sk, np->dst_cookie);
if (dst == NULL) {
dst = ip6_dst_lookup(sk, &fl);
int err = ip6_dst_lookup(sk, &dst, &fl);
if (dst->error) {
sk->sk_err_soft = -dst->error;
dst_release(dst);
return -sk->sk_err_soft;
if (err) {
sk->sk_err_soft = -err;
return err;
}
ip6_dst_store(sk, dst, NULL);
sk->sk_route_caps = dst->dev->features &
~(NETIF_F_IP_CSUM | NETIF_F_TSO);
tcp_sk(sk)->ext2_header_len = dst->header_len;
}
skb->dst = dst_clone(dst);
......
......@@ -330,21 +330,11 @@ int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
}
dst = ip6_route_output(sk, &fl);
if ((err = dst->error) != 0) {
dst_release(dst);
err = ip6_dst_lookup(sk, &dst, &fl);
if (err)
goto out;
}
/* get the source address used in the appropriate device */
err = ipv6_get_saddr(dst, daddr, &fl.fl6_src);
if (err) {
dst_release(dst);
goto out;
}
/* source address lookup done in ip6_dst_lookup */
if (ipv6_addr_any(&np->saddr))
ipv6_addr_copy(&np->saddr, &fl.fl6_src);
......@@ -930,8 +920,8 @@ static int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg
if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
fl.oif = np->mcast_oif;
dst = ip6_dst_lookup(sk, &fl);
if ((err = dst->error))
err = ip6_dst_lookup(sk, &dst, &fl);
if (err)
goto out;
if (hlimit < 0) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment