Commit 400dfd3a authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: refactor sk_page_frag_refill()

While working on virtio_net new allocation strategy to increase
payload/truesize ratio, we found that refactoring sk_page_frag_refill()
was needed.

This patch splits sk_page_frag_refill() into two parts, adding
skb_page_frag_refill() which can be used without a socket.

While we are at it, add a minimum frag size of 32 for
sk_page_frag_refill()

Michael will either use netdev_alloc_frag() from softirq context,
or skb_page_frag_refill() from process context in refill_work()
 (GFP_KERNEL allocations)
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Michael Dalton <mwdalton@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent baf785ba
...@@ -2062,6 +2062,8 @@ static inline void skb_frag_set_page(struct sk_buff *skb, int f, ...@@ -2062,6 +2062,8 @@ static inline void skb_frag_set_page(struct sk_buff *skb, int f,
__skb_frag_set_page(&skb_shinfo(skb)->frags[f], page); __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
} }
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
/** /**
* skb_frag_dma_map - maps a paged fragment via the DMA API * skb_frag_dma_map - maps a paged fragment via the DMA API
* @dev: the device to map the fragment to * @dev: the device to map the fragment to
......
...@@ -1847,7 +1847,17 @@ EXPORT_SYMBOL(sock_alloc_send_skb); ...@@ -1847,7 +1847,17 @@ EXPORT_SYMBOL(sock_alloc_send_skb);
/* On 32bit arches, an skb frag is limited to 2^15 */ /* On 32bit arches, an skb frag is limited to 2^15 */
#define SKB_FRAG_PAGE_ORDER get_order(32768) #define SKB_FRAG_PAGE_ORDER get_order(32768)
bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) /**
* skb_page_frag_refill - check that a page_frag contains enough room
* @sz: minimum size of the fragment we want to get
* @pfrag: pointer to page_frag
* @prio: priority for memory allocation
*
* Note: While this allocator tries to use high order pages, there is
* no guarantee that allocations succeed. Therefore, @sz MUST be
* less or equal than PAGE_SIZE.
*/
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
{ {
int order; int order;
...@@ -1856,16 +1866,16 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) ...@@ -1856,16 +1866,16 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
pfrag->offset = 0; pfrag->offset = 0;
return true; return true;
} }
if (pfrag->offset < pfrag->size) if (pfrag->offset + sz <= pfrag->size)
return true; return true;
put_page(pfrag->page); put_page(pfrag->page);
} }
/* We restrict high order allocations to users that can afford to wait */ /* We restrict high order allocations to users that can afford to wait */
order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0; order = (prio & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
do { do {
gfp_t gfp = sk->sk_allocation; gfp_t gfp = prio;
if (order) if (order)
gfp |= __GFP_COMP | __GFP_NOWARN; gfp |= __GFP_COMP | __GFP_NOWARN;
...@@ -1877,6 +1887,15 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) ...@@ -1877,6 +1887,15 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
} }
} while (--order >= 0); } while (--order >= 0);
return false;
}
EXPORT_SYMBOL(skb_page_frag_refill);
bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
{
if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
return true;
sk_enter_memory_pressure(sk); sk_enter_memory_pressure(sk);
sk_stream_moderate_sndbuf(sk); sk_stream_moderate_sndbuf(sk);
return false; return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment