Commit 3d153a7c authored by Andy Fleming's avatar Andy Fleming Committed by David S. Miller

net: Allow skb_recycle_check to be done in stages

skb_recycle_check resets the skb if it's eligible for recycling.
However, there are times when a driver might want to optionally
manipulate the skb data with the skb before resetting the skb,
but after it has determined eligibility.  We do this by splitting the
eligibility check from the skb reset, creating two inline functions to
accomplish that task.
Signed-off-by: default avatarAndy Fleming <afleming@freescale.com>
Acked-by: default avatarDavid Daney <david.daney@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1e5c22cd
...@@ -550,6 +550,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, ...@@ -550,6 +550,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
return __alloc_skb(size, priority, 1, NUMA_NO_NODE); return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
} }
extern void skb_recycle(struct sk_buff *skb);
extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
...@@ -2484,5 +2485,25 @@ static inline void skb_checksum_none_assert(struct sk_buff *skb) ...@@ -2484,5 +2485,25 @@ static inline void skb_checksum_none_assert(struct sk_buff *skb)
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
static inline bool skb_is_recycleable(struct sk_buff *skb, int skb_size)
{
if (irqs_disabled())
return false;
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
return false;
if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
return false;
skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
if (skb_end_pointer(skb) - skb->head < skb_size)
return false;
if (skb_shared(skb) || skb_cloned(skb))
return false;
return true;
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */ #endif /* _LINUX_SKBUFF_H */
...@@ -485,37 +485,17 @@ void consume_skb(struct sk_buff *skb) ...@@ -485,37 +485,17 @@ void consume_skb(struct sk_buff *skb)
EXPORT_SYMBOL(consume_skb); EXPORT_SYMBOL(consume_skb);
/** /**
* skb_recycle_check - check if skb can be reused for receive * skb_recycle - clean up an skb for reuse
* @skb: buffer * @skb: buffer
* @skb_size: minimum receive buffer size
* *
* Checks that the skb passed in is not shared or cloned, and * Recycles the skb to be reused as a receive buffer. This
* that it is linear and its head portion at least as large as * function does any necessary reference count dropping, and
* skb_size so that it can be recycled as a receive buffer. * cleans up the skbuff as if it just came from __alloc_skb().
* If these conditions are met, this function does any necessary
* reference count dropping and cleans up the skbuff as if it
* just came from __alloc_skb().
*/ */
bool skb_recycle_check(struct sk_buff *skb, int skb_size) void skb_recycle(struct sk_buff *skb)
{ {
struct skb_shared_info *shinfo; struct skb_shared_info *shinfo;
if (irqs_disabled())
return false;
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
return false;
if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
return false;
skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
if (skb_end_pointer(skb) - skb->head < skb_size)
return false;
if (skb_shared(skb) || skb_cloned(skb))
return false;
skb_release_head_state(skb); skb_release_head_state(skb);
shinfo = skb_shinfo(skb); shinfo = skb_shinfo(skb);
...@@ -525,6 +505,27 @@ bool skb_recycle_check(struct sk_buff *skb, int skb_size) ...@@ -525,6 +505,27 @@ bool skb_recycle_check(struct sk_buff *skb, int skb_size)
memset(skb, 0, offsetof(struct sk_buff, tail)); memset(skb, 0, offsetof(struct sk_buff, tail));
skb->data = skb->head + NET_SKB_PAD; skb->data = skb->head + NET_SKB_PAD;
skb_reset_tail_pointer(skb); skb_reset_tail_pointer(skb);
}
EXPORT_SYMBOL(skb_recycle);
/**
* skb_recycle_check - check if skb can be reused for receive
* @skb: buffer
* @skb_size: minimum receive buffer size
*
* Checks that the skb passed in is not shared or cloned, and
* that it is linear and its head portion at least as large as
* skb_size so that it can be recycled as a receive buffer.
* If these conditions are met, this function does any necessary
* reference count dropping and cleans up the skbuff as if it
* just came from __alloc_skb().
*/
bool skb_recycle_check(struct sk_buff *skb, int skb_size)
{
if (!skb_is_recycleable(skb, skb_size))
return false;
skb_recycle(skb);
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment