Commit f545a38f authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: add a limit parameter to sk_add_backlog()

sk_add_backlog() & sk_rcvqueues_full() hard coded sk_rcvbuf as the
memory limit. We need to make this limit a parameter for TCP use.

No functional change expected in this patch, all callers still using the
old sk_rcvbuf limit.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Tom Herbert <therbert@google.com>
Cc: Maciej Żenczykowski <maze@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Cc: Rick Jones <rick.jones2@hp.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b9898507
......@@ -709,17 +709,19 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
* Do not take into account this skb truesize,
* to allow even a single big packet to come.
*/
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb,
unsigned int limit)
{
unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
return qsize > sk->sk_rcvbuf;
return qsize > limit;
}
/* The per-socket spinlock must be held here. */
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
unsigned int limit)
{
if (sk_rcvqueues_full(sk, skb))
if (sk_rcvqueues_full(sk, skb, limit))
return -ENOBUFS;
__sk_add_backlog(sk, skb);
......
......@@ -389,7 +389,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
skb->dev = NULL;
if (sk_rcvqueues_full(sk, skb)) {
if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
atomic_inc(&sk->sk_drops);
goto discard_and_relse;
}
......@@ -406,7 +406,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
rc = sk_backlog_rcv(sk, skb);
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
} else if (sk_add_backlog(sk, skb)) {
} else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
bh_unlock_sock(sk);
atomic_inc(&sk->sk_drops);
goto discard_and_relse;
......
......@@ -1752,7 +1752,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
if (!tcp_prequeue(sk, skb))
ret = tcp_v4_do_rcv(sk, skb);
}
} else if (unlikely(sk_add_backlog(sk, skb))) {
} else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf))) {
bh_unlock_sock(sk);
NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
goto discard_and_relse;
......
......@@ -1479,7 +1479,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
goto drop;
if (sk_rcvqueues_full(sk, skb))
if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
goto drop;
rc = 0;
......@@ -1488,7 +1488,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
rc = __udp_queue_rcv_skb(sk, skb);
else if (sk_add_backlog(sk, skb)) {
else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
bh_unlock_sock(sk);
goto drop;
}
......
......@@ -1654,7 +1654,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
if (!tcp_prequeue(sk, skb))
ret = tcp_v6_do_rcv(sk, skb);
}
} else if (unlikely(sk_add_backlog(sk, skb))) {
} else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf))) {
bh_unlock_sock(sk);
NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
goto discard_and_relse;
......
......@@ -611,14 +611,14 @@ static void flush_stack(struct sock **stack, unsigned int count,
sk = stack[i];
if (skb1) {
if (sk_rcvqueues_full(sk, skb1)) {
if (sk_rcvqueues_full(sk, skb1, sk->sk_rcvbuf)) {
kfree_skb(skb1);
goto drop;
}
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb1);
else if (sk_add_backlog(sk, skb1)) {
else if (sk_add_backlog(sk, skb1, sk->sk_rcvbuf)) {
kfree_skb(skb1);
bh_unlock_sock(sk);
goto drop;
......@@ -790,14 +790,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
/* deliver */
if (sk_rcvqueues_full(sk, skb)) {
if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
sock_put(sk);
goto discard;
}
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb);
else if (sk_add_backlog(sk, skb)) {
else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
atomic_inc(&sk->sk_drops);
bh_unlock_sock(sk);
sock_put(sk);
......
......@@ -828,7 +828,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
else {
dprintk("%s: adding to backlog...\n", __func__);
llc_set_backlog_type(skb, LLC_PACKET);
if (sk_add_backlog(sk, skb))
if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
goto drop_unlock;
}
out:
......
......@@ -342,7 +342,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
sctp_bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
if (sk_add_backlog(sk, skb))
if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
sctp_chunk_free(chunk);
else
backloged = 1;
......@@ -376,7 +376,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
struct sctp_ep_common *rcvr = chunk->rcvr;
int ret;
ret = sk_add_backlog(sk, skb);
ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
if (!ret) {
/* Hold the assoc/ep while hanging on the backlog queue.
* This way, we know structures we need will not disappear
......
......@@ -1330,7 +1330,7 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
if (!sock_owned_by_user(sk)) {
res = filter_rcv(sk, buf);
} else {
if (sk_add_backlog(sk, buf))
if (sk_add_backlog(sk, buf, sk->sk_rcvbuf))
res = TIPC_ERR_OVERLOAD;
else
res = TIPC_OK;
......
......@@ -58,7 +58,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
if (!sock_owned_by_user(sk)) {
queued = x25_process_rx_frame(sk, skb);
} else {
queued = !sk_add_backlog(sk, skb);
queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf);
}
bh_unlock_sock(sk);
sock_put(sk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment