Commit b1faf566 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: sock_queue_err_skb() dont mess with sk_forward_alloc

Correct sk_forward_alloc handling for error_queue would need to use a
backlog of frames that softirq handler could not deliver because socket
is owned by user thread. Or extend backlog processing to be able to
process normal and error packets.

Another possibility is to not use mem charge for error queue, this is
what I implemented in this patch.

Note: this reverts commit 29030374
(net: fix sk_forward_alloc corruptions), since we dont need to lock
socket anymore.
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent bc284f94
...@@ -1524,20 +1524,7 @@ extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); ...@@ -1524,20 +1524,7 @@ extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
{
/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
number of warnings when compiling with -W --ANK
*/
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
(unsigned)sk->sk_rcvbuf)
return -ENOMEM;
skb_set_owner_r(skb, sk);
skb_queue_tail(&sk->sk_error_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, skb->len);
return 0;
}
/* /*
* Recover an error report and clear atomically * Recover an error report and clear atomically
......
...@@ -2965,6 +2965,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) ...@@ -2965,6 +2965,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
} }
EXPORT_SYMBOL_GPL(skb_cow_data); EXPORT_SYMBOL_GPL(skb_cow_data);
static void sock_rmem_free(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
}
/*
* Note: We dont mem charge error packets (no sk_forward_alloc changes)
*/
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
{
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
(unsigned)sk->sk_rcvbuf)
return -ENOMEM;
skb_orphan(skb);
skb->sk = sk;
skb->destructor = sock_rmem_free;
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
skb_queue_tail(&sk->sk_error_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, skb->len);
return 0;
}
EXPORT_SYMBOL(sock_queue_err_skb);
void skb_tstamp_tx(struct sk_buff *orig_skb, void skb_tstamp_tx(struct sk_buff *orig_skb,
struct skb_shared_hwtstamps *hwtstamps) struct skb_shared_hwtstamps *hwtstamps)
{ {
...@@ -2997,9 +3025,7 @@ void skb_tstamp_tx(struct sk_buff *orig_skb, ...@@ -2997,9 +3025,7 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
serr->ee.ee_errno = ENOMSG; serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
bh_lock_sock(sk);
err = sock_queue_err_skb(sk, skb); err = sock_queue_err_skb(sk, skb);
bh_unlock_sock(sk);
if (err) if (err)
kfree_skb(skb); kfree_skb(skb);
......
...@@ -633,11 +633,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) ...@@ -633,11 +633,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
if (!inet->recverr) { if (!inet->recverr) {
if (!harderr || sk->sk_state != TCP_ESTABLISHED) if (!harderr || sk->sk_state != TCP_ESTABLISHED)
goto out; goto out;
} else { } else
bh_lock_sock(sk);
ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
bh_unlock_sock(sk);
}
sk->sk_err = err; sk->sk_err = err;
sk->sk_error_report(sk); sk->sk_error_report(sk);
out: out:
......
...@@ -466,11 +466,9 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -466,11 +466,9 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (sk->sk_state != TCP_ESTABLISHED && !np->recverr) if (sk->sk_state != TCP_ESTABLISHED && !np->recverr)
goto out; goto out;
if (np->recverr) { if (np->recverr)
bh_lock_sock(sk);
ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
bh_unlock_sock(sk);
}
sk->sk_err = err; sk->sk_err = err;
sk->sk_error_report(sk); sk->sk_error_report(sk);
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment