Commit 56951b54 authored by Alexey Kuznetsov's avatar Alexey Kuznetsov Committed by David S. Miller

[IPv4]: More output path work.

- Change {udp,raw}_sendmsg to ip_append_data, work done
by Maxim Giryaev <gem@asplinux.ru>
- Get rid of ip_build_xmit() and helper functions.
Nobody uses it anymore.
parent 012bd31f
...@@ -98,17 +98,6 @@ extern int ip_do_nat(struct sk_buff *skb); ...@@ -98,17 +98,6 @@ extern int ip_do_nat(struct sk_buff *skb);
extern void ip_send_check(struct iphdr *ip); extern void ip_send_check(struct iphdr *ip);
extern int ip_queue_xmit(struct sk_buff *skb); extern int ip_queue_xmit(struct sk_buff *skb);
extern void ip_init(void); extern void ip_init(void);
extern int ip_build_xmit(struct sock *sk,
int getfrag (const void *,
char *,
unsigned int,
unsigned int,
struct sk_buff *),
const void *frag,
unsigned length,
struct ipcm_cookie *ipc,
struct rtable *rt,
int flags);
extern int ip_append_data(struct sock *sk, extern int ip_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len, int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb), int odd, struct sk_buff *skb),
......
...@@ -375,361 +375,6 @@ int ip_queue_xmit(struct sk_buff *skb) ...@@ -375,361 +375,6 @@ int ip_queue_xmit(struct sk_buff *skb)
return -EHOSTUNREACH; return -EHOSTUNREACH;
} }
/* _Dead beaf_
*
* Build and send a packet, with as little as one copy
*
* Doesn't care much about ip options... option length can be
* different for fragment at 0 and other fragments.
*
* Note that the fragment at the highest offset is sent first,
* so the getfrag routine can fill in the TCP/UDP checksum header
* field in the last fragment it sends... actually it also helps
* the reassemblers, they can put most packets in at the head of
* the fragment queue, and they know the total size in advance. This
* last feature will measurably improve the Linux fragment handler one
* day.
*
* The callback has five args, an arbitrary pointer (copy of frag),
* the source IP address (may depend on the routing table), the
* destination address (char *), the offset to copy from, and the
* length to be copied.
*/
static int ip_build_xmit_slow(struct sock *sk,
int getfrag (const void *,
char *,
unsigned int,
unsigned int,
struct sk_buff *),
const void *frag,
unsigned length,
struct ipcm_cookie *ipc,
struct rtable *rt,
int flags)
{
struct inet_opt *inet = inet_sk(sk);
unsigned int fraglen, maxfraglen, fragheaderlen;
int err;
int offset, mf;
int mtu;
u16 id;
int hh_len = (rt->u.dst.dev->hard_header_len&~15) + 16;
int nfrags=0;
struct ip_options *opt = ipc->opt;
int df = 0;
int csumselect = CHECKSUM_NONE;
mtu = rt->u.dst.pmtu;
if (ip_dont_fragment(sk, &rt->u.dst))
df = htons(IP_DF);
length -= sizeof(struct iphdr);
if (opt) {
fragheaderlen = sizeof(struct iphdr) + opt->optlen;
maxfraglen = ((mtu-sizeof(struct iphdr)-opt->optlen) & ~7) + fragheaderlen;
} else {
fragheaderlen = sizeof(struct iphdr);
/*
* Fragheaderlen is the size of 'overhead' on each buffer. Now work
* out the size of the frames to send.
*/
maxfraglen = ((mtu-sizeof(struct iphdr)) & ~7) + fragheaderlen;
}
if (length + fragheaderlen > 0xFFFF) {
ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
return -EMSGSIZE;
}
/*
* Start at the end of the frame by handling the remainder.
*/
offset = length - (length % (maxfraglen - fragheaderlen));
/*
* Amount of memory to allocate for final fragment.
*/
fraglen = length - offset + fragheaderlen;
if (length-offset==0) {
fraglen = maxfraglen;
offset -= maxfraglen-fragheaderlen;
}
/*
* The last fragment will not have MF (more fragments) set.
*/
mf = 0;
/*
* Don't fragment packets for path mtu discovery.
*/
if (offset > 0 && inet->pmtudisc == IP_PMTUDISC_DO) {
ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
return -EMSGSIZE;
}
if (flags&MSG_PROBE)
goto out;
/*
* Give the upper layer a chance to decide whether to use HW
* checksumming or not.
*/
if (offset == 0 && rt->u.dst.dev->features & (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM))
csumselect = CHECKSUM_HW;
/*
* Begin outputting the bytes.
*/
id = inet->id++;
do {
char *data;
struct sk_buff * skb;
/*
* Get the memory we require with some space left for alignment.
*/
if (!(flags & MSG_DONTWAIT) || nfrags == 0) {
skb = sock_alloc_send_skb(sk, fraglen + hh_len + 15,
(flags & MSG_DONTWAIT), &err);
} else {
/* On a non-blocking write, we check for send buffer
* usage on the first fragment only.
*/
skb = sock_wmalloc(sk, fraglen + hh_len + 15, 1,
sk->allocation);
if (!skb)
err = -ENOBUFS;
}
if (skb == NULL)
goto error;
/*
* Fill in the control structures
*/
skb->priority = sk->priority;
skb->dst = dst_clone(&rt->u.dst);
skb->ip_summed = csumselect;
skb_reserve(skb, hh_len);
/*
* Find where to start putting bytes.
*/
data = skb_put(skb, fraglen);
skb->nh.iph = (struct iphdr *)data;
/*
* Only write IP header onto non-raw packets
*/
{
struct iphdr *iph = (struct iphdr *)data;
iph->version = 4;
iph->ihl = 5;
if (opt) {
iph->ihl += opt->optlen>>2;
ip_options_build(skb, opt,
ipc->addr, rt, offset);
}
iph->tos = inet->tos;
iph->tot_len = htons(fraglen - fragheaderlen + iph->ihl*4);
iph->frag_off = htons(offset>>3)|mf|df;
iph->id = id;
if (!mf) {
if (offset || !df) {
/* Select an unpredictable ident only
* for packets without DF or having
* been fragmented.
*/
__ip_select_ident(iph, &rt->u.dst, 0);
id = iph->id;
}
/*
* Any further fragments will have MF set.
*/
mf = htons(IP_MF);
}
if (rt->rt_type == RTN_MULTICAST)
iph->ttl = inet->mc_ttl;
else
iph->ttl = inet->ttl;
iph->protocol = sk->protocol;
iph->saddr = rt->rt_src;
iph->daddr = rt->rt_dst;
ip_send_check(iph);
data += iph->ihl*4;
skb->h.raw = data;
}
/*
* User data callback
*/
if (getfrag(frag, data, offset, fraglen-fragheaderlen, skb)) {
err = -EFAULT;
kfree_skb(skb);
goto error;
}
offset -= (maxfraglen-fragheaderlen);
fraglen = maxfraglen;
nfrags++;
err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
skb->dst->dev, dst_output);
if (err) {
if (err > 0)
err = inet->recverr ? net_xmit_errno(err) : 0;
if (err)
goto error;
}
} while (offset >= 0);
if (nfrags>1)
ip_statistics[smp_processor_id()*2 + !in_softirq()].IpFragCreates += nfrags;
out:
return 0;
error:
IP_INC_STATS(IpOutDiscards);
if (nfrags>1)
ip_statistics[smp_processor_id()*2 + !in_softirq()].IpFragCreates += nfrags;
return err;
}
/*
* Fast path for unfragmented packets.
*/
int ip_build_xmit(struct sock *sk,
int getfrag (const void *,
char *,
unsigned int,
unsigned int,
struct sk_buff *),
const void *frag,
unsigned length,
struct ipcm_cookie *ipc,
struct rtable *rt,
int flags)
{
struct inet_opt *inet = inet_sk(sk);
int err;
struct sk_buff *skb;
int df;
struct iphdr *iph;
/*
* Try the simple case first. This leaves fragmented frames, and by
* choice RAW frames within 20 bytes of maximum size(rare) to the long path
*/
if (!inet->hdrincl) {
length += sizeof(struct iphdr);
/*
* Check for slow path.
*/
if (length > rt->u.dst.pmtu || ipc->opt != NULL)
return ip_build_xmit_slow(sk,getfrag,frag,length,ipc,rt,flags);
} else {
if (length > rt->u.dst.dev->mtu) {
ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport,
rt->u.dst.dev->mtu);
return -EMSGSIZE;
}
}
if (flags&MSG_PROBE)
goto out;
/*
* Do path mtu discovery if needed.
*/
df = 0;
if (ip_dont_fragment(sk, &rt->u.dst))
df = htons(IP_DF);
/*
* Fast path for unfragmented frames without options.
*/
{
int hh_len = (rt->u.dst.dev->hard_header_len&~15) + 16;
skb = sock_alloc_send_skb(sk, length+hh_len+15,
flags&MSG_DONTWAIT, &err);
if(skb==NULL)
goto error;
skb_reserve(skb, hh_len);
}
skb->priority = sk->priority;
skb->dst = dst_clone(&rt->u.dst);
skb->nh.iph = iph = (struct iphdr *)skb_put(skb, length);
/*
* Give the upper layer a chance to decide whether to use HW
* checksumming or not.
*/
if (rt->u.dst.dev->features & (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM))
skb->ip_summed = CHECKSUM_HW;
if (!inet->hdrincl) {
iph->version=4;
iph->ihl=5;
iph->tos = inet->tos;
iph->tot_len = htons(length);
iph->frag_off = df;
iph->ttl = inet->mc_ttl;
ip_select_ident(iph, &rt->u.dst, sk);
if (rt->rt_type != RTN_MULTICAST)
iph->ttl = inet->ttl;
iph->protocol=sk->protocol;
iph->saddr=rt->rt_src;
iph->daddr=rt->rt_dst;
ip_send_check(iph);
skb->h.raw = skb->nh.raw + iph->ihl*4;
err = getfrag(frag, ((char *)iph)+iph->ihl*4,0, length-iph->ihl*4, skb);
}
else {
skb->h.raw = skb->nh.raw;
err = getfrag(frag, (void *)iph, 0, length, skb);
}
if (err)
goto error_fault;
err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
dst_output);
if (err > 0)
err = inet->recverr ? net_xmit_errno(err) : 0;
if (err)
goto error;
out:
return 0;
error_fault:
err = -EFAULT;
kfree_skb(skb);
error:
IP_INC_STATS(IpOutDiscards);
return err;
}
static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
{ {
...@@ -1082,9 +727,6 @@ int ip_append_data(struct sock *sk, ...@@ -1082,9 +727,6 @@ int ip_append_data(struct sock *sk,
unsigned int maxfraglen, fragheaderlen; unsigned int maxfraglen, fragheaderlen;
int csummode = CHECKSUM_NONE; int csummode = CHECKSUM_NONE;
if (inet->hdrincl)
return -EPERM;
if (flags&MSG_PROBE) if (flags&MSG_PROBE)
return 0; return 0;
......
...@@ -64,6 +64,7 @@ ...@@ -64,6 +64,7 @@
#include <net/raw.h> #include <net/raw.h>
#include <net/inet_common.h> #include <net/inet_common.h>
#include <net/checksum.h> #include <net/checksum.h>
#include <linux/netfilter.h>
struct sock *raw_v4_htable[RAWV4_HTABLE_SIZE]; struct sock *raw_v4_htable[RAWV4_HTABLE_SIZE];
rwlock_t raw_v4_lock = RW_LOCK_UNLOCKED; rwlock_t raw_v4_lock = RW_LOCK_UNLOCKED;
...@@ -243,59 +244,71 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -243,59 +244,71 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
return 0; return 0;
} }
struct rawfakehdr static int raw_send_hdrinc(struct sock *sk, void *from, int length,
struct rtable *rt,
unsigned int flags)
{ {
struct iovec *iov; struct inet_opt *inet = inet_sk(sk);
u32 saddr; int hh_len;
struct dst_entry *dst; struct iphdr *iph;
}; struct sk_buff *skb;
int err;
/* if (length > rt->u.dst.dev->mtu) {
* Send a RAW IP packet. ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport,
*/ rt->u.dst.dev->mtu);
return -EMSGSIZE;
}
if (flags&MSG_PROBE)
goto out;
/* hh_len = (rt->u.dst.dev->hard_header_len&~15) + 16;
* Callback support is trivial for SOCK_RAW
*/
static int raw_getfrag(const void *p, char *to, unsigned int offset, skb = sock_alloc_send_skb(sk, length+hh_len+15,
unsigned int fraglen, struct sk_buff *skb) flags&MSG_DONTWAIT, &err);
{ if (skb == NULL)
struct rawfakehdr *rfh = (struct rawfakehdr *) p; goto error;
skb->ip_summed = CHECKSUM_NONE; /* Is there any good place to set it? */ skb_reserve(skb, hh_len);
return memcpy_fromiovecend(to, rfh->iov, offset, fraglen);
}
/* skb->priority = sk->priority;
* IPPROTO_RAW needs extra work. skb->dst = dst_clone(&rt->u.dst);
*/
static int raw_getrawfrag(const void *p, char *to, unsigned int offset, skb->nh.iph = iph = (struct iphdr *)skb_put(skb, length);
unsigned int fraglen, struct sk_buff *skb)
{
struct rawfakehdr *rfh = (struct rawfakehdr *) p;
skb->ip_summed = CHECKSUM_NONE; /* Is there any good place to set it? */ skb->ip_summed = CHECKSUM_NONE;
if (memcpy_fromiovecend(to, rfh->iov, offset, fraglen)) skb->h.raw = skb->nh.raw;
return -EFAULT; err = memcpy_fromiovecend((void *)iph, from, 0, length);
if (err)
goto error_fault;
if (!offset) { /* We don't modify invalid header */
struct iphdr *iph = (struct iphdr *)to; if (length >= sizeof(*iph) && iph->ihl * 4 <= length) {
if (!iph->saddr) if (!iph->saddr)
iph->saddr = rfh->saddr; iph->saddr = rt->rt_src;
iph->check = 0; iph->check = 0;
iph->tot_len = htons(fraglen); /* This is right as you can't iph->tot_len = htons(length);
frag RAW packets */
/*
* Deliberate breach of modularity to keep
* ip_build_xmit clean (well less messy).
*/
if (!iph->id) if (!iph->id)
ip_select_ident(iph, rfh->dst, NULL); ip_select_ident(iph, &rt->u.dst, NULL);
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
} }
err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
dst_output);
if (err > 0)
err = inet->recverr ? net_xmit_errno(err) : 0;
if (err)
goto error;
out:
return 0; return 0;
error_fault:
err = -EFAULT;
kfree_skb(skb);
error:
IP_INC_STATS(IpOutDiscards);
return err;
} }
static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
...@@ -303,10 +316,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -303,10 +316,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
{ {
struct inet_opt *inet = inet_sk(sk); struct inet_opt *inet = inet_sk(sk);
struct ipcm_cookie ipc; struct ipcm_cookie ipc;
struct rawfakehdr rfh;
struct rtable *rt = NULL; struct rtable *rt = NULL;
int free = 0; int free = 0;
u32 daddr; u32 daddr;
u32 saddr;
u8 tos; u8 tos;
int err; int err;
...@@ -376,7 +389,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -376,7 +389,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
free = 1; free = 1;
} }
rfh.saddr = ipc.addr; saddr = ipc.addr;
ipc.addr = daddr; ipc.addr = daddr;
if (!ipc.opt) if (!ipc.opt)
...@@ -402,14 +415,14 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -402,14 +415,14 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (MULTICAST(daddr)) { if (MULTICAST(daddr)) {
if (!ipc.oif) if (!ipc.oif)
ipc.oif = inet->mc_index; ipc.oif = inet->mc_index;
if (!rfh.saddr) if (!saddr)
rfh.saddr = inet->mc_addr; saddr = inet->mc_addr;
} }
{ {
struct flowi fl = { .nl_u = { .ip4_u = struct flowi fl = { .nl_u = { .ip4_u =
{ .daddr = daddr, { .daddr = daddr,
.saddr = rfh.saddr, .saddr = saddr,
.tos = tos } }, .tos = tos } },
.oif = ipc.oif }; .oif = ipc.oif };
err = ip_route_output_key(&rt, &fl); err = ip_route_output_key(&rt, &fl);
...@@ -425,14 +438,22 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -425,14 +438,22 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
goto do_confirm; goto do_confirm;
back_from_confirm: back_from_confirm:
rfh.iov = msg->msg_iov; if (inet->hdrincl)
rfh.saddr = rt->rt_src; err = raw_send_hdrinc(sk, msg->msg_iov, len,
rfh.dst = &rt->u.dst; rt, msg->msg_flags);
else {
if (!ipc.addr) if (!ipc.addr)
ipc.addr = rt->rt_dst; ipc.addr = rt->rt_dst;
err = ip_build_xmit(sk, inet->hdrincl ? raw_getrawfrag : lock_sock(sk);
raw_getfrag, &rfh, len, &ipc, rt, msg->msg_flags); err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0,
&ipc, rt, msg->msg_flags);
if (err)
ip_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE))
err = ip_push_pending_frames(sk);
release_sock(sk);
}
done: done:
if (free) if (free)
kfree(ipc.opt); kfree(ipc.opt);
......
...@@ -464,69 +464,6 @@ static unsigned short udp_check(struct udphdr *uh, int len, unsigned long saddr, ...@@ -464,69 +464,6 @@ static unsigned short udp_check(struct udphdr *uh, int len, unsigned long saddr,
return(csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base)); return(csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base));
} }
struct udpfakehdr
{
struct udphdr uh;
u32 saddr;
u32 daddr;
struct iovec *iov;
u32 wcheck;
};
/*
* Copy and checksum a UDP packet from user space into a buffer.
*/
static int udp_getfrag(const void *p, char * to, unsigned int offset, unsigned int fraglen, struct sk_buff *skb)
{
struct udpfakehdr *ufh = (struct udpfakehdr *)p;
if (offset==0) {
if (skb->ip_summed == CHECKSUM_HW) {
skb->csum = offsetof(struct udphdr, check);
ufh->uh.check = ~csum_tcpudp_magic(ufh->saddr, ufh->daddr,
ntohs(ufh->uh.len), IPPROTO_UDP, 0);
memcpy(to, ufh, sizeof(struct udphdr));
return memcpy_fromiovecend(to+sizeof(struct udphdr), ufh->iov, offset,
fraglen-sizeof(struct udphdr));
}
if (csum_partial_copy_fromiovecend(to+sizeof(struct udphdr), ufh->iov, offset,
fraglen-sizeof(struct udphdr), &ufh->wcheck))
return -EFAULT;
ufh->wcheck = csum_partial((char *)ufh, sizeof(struct udphdr),
ufh->wcheck);
ufh->uh.check = csum_tcpudp_magic(ufh->saddr, ufh->daddr,
ntohs(ufh->uh.len),
IPPROTO_UDP, ufh->wcheck);
if (ufh->uh.check == 0)
ufh->uh.check = -1;
memcpy(to, ufh, sizeof(struct udphdr));
return 0;
}
if (csum_partial_copy_fromiovecend(to, ufh->iov, offset-sizeof(struct udphdr),
fraglen, &ufh->wcheck))
return -EFAULT;
return 0;
}
/*
* Copy a UDP packet from user space into a buffer without checksumming.
*/
static int udp_getfrag_nosum(const void *p, char * to, unsigned int offset, unsigned int fraglen, struct sk_buff *skb)
{
struct udpfakehdr *ufh = (struct udpfakehdr *)p;
skb->ip_summed = CHECKSUM_NONE;
if (offset==0) {
memcpy(to, ufh, sizeof(struct udphdr));
return memcpy_fromiovecend(to+sizeof(struct udphdr), ufh->iov, offset,
fraglen-sizeof(struct udphdr));
}
return memcpy_fromiovecend(to, ufh->iov, offset-sizeof(struct udphdr),
fraglen);
}
int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
int len) int len)
{ {
...@@ -534,18 +471,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -534,18 +471,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
struct udp_opt *up = udp_sk(sk); struct udp_opt *up = udp_sk(sk);
int ulen = len; int ulen = len;
struct ipcm_cookie ipc; struct ipcm_cookie ipc;
struct udpfakehdr ufh;
struct rtable *rt = NULL; struct rtable *rt = NULL;
int free = 0; int free = 0;
int connected = 0; int connected = 0;
u32 daddr; u32 daddr, faddr, saddr;
u16 dport;
u8 tos; u8 tos;
int err; int err;
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
/* This check is ONLY to check for arithmetic overflow /* This check is ONLY to check for arithmetic overflow
on integer(!) len. Not more! Real check will be made on integer(!) len. Not more! Real check will be made
in ip_build_xmit --ANK in ip_append_* --ANK
BTW socket.c -> af_*.c -> ... make multiple BTW socket.c -> af_*.c -> ... make multiple
invalid conversions size_t -> int. We MUST repair it f.e. invalid conversions size_t -> int. We MUST repair it f.e.
...@@ -593,22 +530,21 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -593,22 +530,21 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
return -EINVAL; return -EINVAL;
} }
ufh.daddr = usin->sin_addr.s_addr; daddr = usin->sin_addr.s_addr;
ufh.uh.dest = usin->sin_port; dport = usin->sin_port;
if (ufh.uh.dest == 0) if (dport == 0)
return -EINVAL; return -EINVAL;
} else { } else {
if (sk->state != TCP_ESTABLISHED) if (sk->state != TCP_ESTABLISHED)
return -ENOTCONN; return -ENOTCONN;
ufh.daddr = inet->daddr; daddr = inet->daddr;
ufh.uh.dest = inet->dport; dport = inet->dport;
/* Open fast path for connected socket. /* Open fast path for connected socket.
Route will not be used, if at least one option is set. Route will not be used, if at least one option is set.
*/ */
connected = 1; connected = 1;
} }
ipc.addr = inet->saddr; ipc.addr = inet->saddr;
ufh.uh.source = inet->sport;
ipc.oif = sk->bound_dev_if; ipc.oif = sk->bound_dev_if;
if (msg->msg_controllen) { if (msg->msg_controllen) {
...@@ -622,13 +558,13 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -622,13 +558,13 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (!ipc.opt) if (!ipc.opt)
ipc.opt = inet->opt; ipc.opt = inet->opt;
ufh.saddr = ipc.addr; saddr = ipc.addr;
ipc.addr = daddr = ufh.daddr; ipc.addr = faddr = daddr;
if (ipc.opt && ipc.opt->srr) { if (ipc.opt && ipc.opt->srr) {
if (!daddr) if (!daddr)
return -EINVAL; return -EINVAL;
daddr = ipc.opt->faddr; faddr = ipc.opt->faddr;
connected = 0; connected = 0;
} }
tos = RT_TOS(inet->tos); tos = RT_TOS(inet->tos);
...@@ -641,8 +577,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -641,8 +577,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (MULTICAST(daddr)) { if (MULTICAST(daddr)) {
if (!ipc.oif) if (!ipc.oif)
ipc.oif = inet->mc_index; ipc.oif = inet->mc_index;
if (!ufh.saddr) if (!saddr)
ufh.saddr = inet->mc_addr; saddr = inet->mc_addr;
connected = 0; connected = 0;
} }
...@@ -651,8 +587,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -651,8 +587,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (rt == NULL) { if (rt == NULL) {
struct flowi fl = { .nl_u = { .ip4_u = struct flowi fl = { .nl_u = { .ip4_u =
{ .daddr = daddr, { .daddr = faddr,
.saddr = ufh.saddr, .saddr = saddr,
.tos = tos } }, .tos = tos } },
.oif = ipc.oif }; .oif = ipc.oif };
err = ip_route_output_key(&rt, &fl); err = ip_route_output_key(&rt, &fl);
...@@ -670,16 +606,10 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -670,16 +606,10 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
goto do_confirm; goto do_confirm;
back_from_confirm: back_from_confirm:
ufh.saddr = rt->rt_src; saddr = rt->rt_src;
if (!ipc.addr) if (!ipc.addr)
ufh.daddr = ipc.addr = rt->rt_dst; daddr = ipc.addr = rt->rt_dst;
ufh.uh.len = htons(ulen);
ufh.uh.check = 0;
ufh.iov = msg->msg_iov;
ufh.wcheck = 0;
/* 0x80000000 is temporary hook for testing new output path */
if (corkreq || rt->u.dst.header_len || (msg->msg_flags&0x80000000)) {
lock_sock(sk); lock_sock(sk);
if (unlikely(up->pending)) { if (unlikely(up->pending)) {
/* The socket is already corked while preparing it. */ /* The socket is already corked while preparing it. */
...@@ -693,23 +623,22 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -693,23 +623,22 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
/* /*
* Now cork the socket to pend data. * Now cork the socket to pend data.
*/ */
up->daddr = ufh.daddr; up->daddr = daddr;
up->dport = ufh.uh.dest; up->dport = dport;
up->saddr = ufh.saddr; up->saddr = saddr;
up->sport = ufh.uh.source; up->sport = inet->sport;
up->pending = 1; up->pending = 1;
goto do_append_data;
}
/* RFC1122: OK. Provides the checksumming facility (MUST) as per */
/* 4.1.3.4. It's configurable by the application via setsockopt() */
/* (MAY) and it defaults to on (MUST). */
err = ip_build_xmit(sk, do_append_data:
(sk->no_check == UDP_CSUM_NOXMIT ? up->len += ulen;
udp_getfrag_nosum : err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, ulen,
udp_getfrag), sizeof(struct udphdr), &ipc, rt,
&ufh, ulen, &ipc, rt, msg->msg_flags); corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
if (err)
udp_flush_pending_frames(sk);
else if (!corkreq)
err = udp_push_pending_frames(sk, up);
release_sock(sk);
out: out:
ip_rt_put(rt); ip_rt_put(rt);
...@@ -727,18 +656,6 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -727,18 +656,6 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
goto back_from_confirm; goto back_from_confirm;
err = 0; err = 0;
goto out; goto out;
do_append_data:
up->len += ulen;
err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, ulen,
sizeof(struct udphdr), &ipc, rt,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
if (err)
udp_flush_pending_frames(sk);
else if (!corkreq)
err = udp_push_pending_frames(sk, up);
release_sock(sk);
goto out;
} }
int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment