Commit 3247e3ff authored by David S. Miller's avatar David S. Miller

Merge branch 'tcp_stream_alloc_skb'

Eric Dumazet says:

====================
tcp: tcp_stream_alloc_skb() changes

sk_stream_alloc_skb() is only used by TCP.

Rename it to tcp_stream_alloc_skb() and apply small
optimizations.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d18785e2 c4322884
...@@ -2422,9 +2422,6 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk) ...@@ -2422,9 +2422,6 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF)); WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
} }
struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
bool force_schedule);
/** /**
* sk_page_frag - return an appropriate page_frag * sk_page_frag - return an appropriate page_frag
* @sk: socket * @sk: socket
......
...@@ -337,6 +337,8 @@ void tcp_twsk_destructor(struct sock *sk); ...@@ -337,6 +337,8 @@ void tcp_twsk_destructor(struct sock *sk);
ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len, struct pipe_inode_info *pipe, size_t len,
unsigned int flags); unsigned int flags);
struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
bool force_schedule);
void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks); void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
static inline void tcp_dec_quickack_mode(struct sock *sk, static inline void tcp_dec_quickack_mode(struct sock *sk,
......
...@@ -856,18 +856,15 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, ...@@ -856,18 +856,15 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
} }
EXPORT_SYMBOL(tcp_splice_read); EXPORT_SYMBOL(tcp_splice_read);
struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
bool force_schedule) bool force_schedule)
{ {
struct sk_buff *skb; struct sk_buff *skb;
/* The TCP header must be at least 32-bit aligned. */
size = ALIGN(size, 4);
if (unlikely(tcp_under_memory_pressure(sk))) if (unlikely(tcp_under_memory_pressure(sk)))
sk_mem_reclaim_partial(sk); sk_mem_reclaim_partial(sk);
skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); skb = alloc_skb_fclone(size + MAX_TCP_HEADER, gfp);
if (likely(skb)) { if (likely(skb)) {
bool mem_scheduled; bool mem_scheduled;
...@@ -878,7 +875,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, ...@@ -878,7 +875,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
mem_scheduled = sk_wmem_schedule(sk, skb->truesize); mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
} }
if (likely(mem_scheduled)) { if (likely(mem_scheduled)) {
skb_reserve(skb, sk->sk_prot->max_header); skb_reserve(skb, MAX_TCP_HEADER);
/* /*
* Make sure that we have exactly size bytes * Make sure that we have exactly size bytes
* available to the caller, no more, no less. * available to the caller, no more, no less.
...@@ -960,7 +957,7 @@ static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, ...@@ -960,7 +957,7 @@ static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
if (!sk_stream_memory_free(sk)) if (!sk_stream_memory_free(sk))
return NULL; return NULL;
skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation,
tcp_rtx_and_write_queues_empty(sk)); tcp_rtx_and_write_queues_empty(sk));
if (!skb) if (!skb)
return NULL; return NULL;
...@@ -1289,7 +1286,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) ...@@ -1289,7 +1286,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
goto restart; goto restart;
} }
first_skb = tcp_rtx_and_write_queues_empty(sk); first_skb = tcp_rtx_and_write_queues_empty(sk);
skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation,
first_skb); first_skb);
if (!skb) if (!skb)
goto wait_for_space; goto wait_for_space;
......
...@@ -1564,7 +1564,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, ...@@ -1564,7 +1564,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
return -ENOMEM; return -ENOMEM;
/* Get a new skb... force flag on. */ /* Get a new skb... force flag on. */
buff = sk_stream_alloc_skb(sk, nsize, gfp, true); buff = tcp_stream_alloc_skb(sk, nsize, gfp, true);
if (!buff) if (!buff)
return -ENOMEM; /* We'll just try again later. */ return -ENOMEM; /* We'll just try again later. */
skb_copy_decrypted(buff, skb); skb_copy_decrypted(buff, skb);
...@@ -2121,7 +2121,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, ...@@ -2121,7 +2121,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE, return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
skb, len, mss_now, gfp); skb, len, mss_now, gfp);
buff = sk_stream_alloc_skb(sk, 0, gfp, true); buff = tcp_stream_alloc_skb(sk, 0, gfp, true);
if (unlikely(!buff)) if (unlikely(!buff))
return -ENOMEM; return -ENOMEM;
skb_copy_decrypted(buff, skb); skb_copy_decrypted(buff, skb);
...@@ -2388,7 +2388,7 @@ static int tcp_mtu_probe(struct sock *sk) ...@@ -2388,7 +2388,7 @@ static int tcp_mtu_probe(struct sock *sk)
return -1; return -1;
/* We're allowed to probe. Build it now. */ /* We're allowed to probe. Build it now. */
nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); nskb = tcp_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
if (!nskb) if (!nskb)
return -1; return -1;
sk_wmem_queued_add(sk, nskb->truesize); sk_wmem_queued_add(sk, nskb->truesize);
...@@ -3754,7 +3754,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) ...@@ -3754,7 +3754,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
/* limit to order-0 allocations */ /* limit to order-0 allocations */
space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER)); space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false); syn_data = tcp_stream_alloc_skb(sk, space, sk->sk_allocation, false);
if (!syn_data) if (!syn_data)
goto fallback; goto fallback;
syn_data->ip_summed = CHECKSUM_PARTIAL; syn_data->ip_summed = CHECKSUM_PARTIAL;
...@@ -3835,7 +3835,7 @@ int tcp_connect(struct sock *sk) ...@@ -3835,7 +3835,7 @@ int tcp_connect(struct sock *sk)
return 0; return 0;
} }
buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true); buff = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
if (unlikely(!buff)) if (unlikely(!buff))
return -ENOBUFS; return -ENOBUFS;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment