Commit 8f78d753 authored by Ian Pratt's avatar Ian Pratt Committed by David S. Miller

[NET]: Add alloc_skb_from_cache.

This serves two purposes: firstly, we like to allocate page-sized skbs
as this means we zero-copy transfer of network buffers between guest
operating systems. Secondly, it enables us to have a cache of pages
that have been used for network buffers that we can be more lax about
scrubbing when they change VM ownership (since they could be sniffed on
the wire).
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fcf4a436
...@@ -292,6 +292,8 @@ struct sk_buff { ...@@ -292,6 +292,8 @@ struct sk_buff {
extern void __kfree_skb(struct sk_buff *skb); extern void __kfree_skb(struct sk_buff *skb);
extern struct sk_buff *alloc_skb(unsigned int size, int priority); extern struct sk_buff *alloc_skb(unsigned int size, int priority);
extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
unsigned int size, int priority);
extern void kfree_skbmem(struct sk_buff *skb); extern void kfree_skbmem(struct sk_buff *skb);
extern struct sk_buff *skb_clone(struct sk_buff *skb, int priority); extern struct sk_buff *skb_clone(struct sk_buff *skb, int priority);
extern struct sk_buff *skb_copy(const struct sk_buff *skb, int priority); extern struct sk_buff *skb_copy(const struct sk_buff *skb, int priority);
...@@ -935,6 +937,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) ...@@ -935,6 +937,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
* *
* %NULL is returned in there is no free memory. * %NULL is returned in there is no free memory.
*/ */
#ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB
static inline struct sk_buff *__dev_alloc_skb(unsigned int length, static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
int gfp_mask) int gfp_mask)
{ {
...@@ -943,6 +946,9 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, ...@@ -943,6 +946,9 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
skb_reserve(skb, 16); skb_reserve(skb, 16);
return skb; return skb;
} }
#else
extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask);
#endif
/** /**
* dev_alloc_skb - allocate an skbuff for sending * dev_alloc_skb - allocate an skbuff for sending
......
...@@ -163,6 +163,59 @@ struct sk_buff *alloc_skb(unsigned int size, int gfp_mask) ...@@ -163,6 +163,59 @@ struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
goto out; goto out;
} }
/**
* alloc_skb_from_cache - allocate a network buffer
* @cp: kmem_cache from which to allocate the data area
* (object size must be big enough for @size bytes + skb overheads)
* @size: size to allocate
* @gfp_mask: allocation mask
*
* Allocate a new &sk_buff. The returned buffer has no headroom and
* tail room of size bytes. The object has a reference count of one.
* The return is the buffer. On a failure the return is %NULL.
*
* Buffers may only be allocated from interrupts using a @gfp_mask of
* %GFP_ATOMIC.
*/
struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
unsigned int size, int gfp_mask)
{
struct sk_buff *skb;
u8 *data;
/* Get the HEAD */
skb = kmem_cache_alloc(skbuff_head_cache,
gfp_mask & ~__GFP_DMA);
if (!skb)
goto out;
/* Get the DATA. */
size = SKB_DATA_ALIGN(size);
data = kmem_cache_alloc(cp, gfp_mask);
if (!data)
goto nodata;
memset(skb, 0, offsetof(struct sk_buff, truesize));
skb->truesize = size + sizeof(struct sk_buff);
atomic_set(&skb->users, 1);
skb->head = data;
skb->data = data;
skb->tail = data;
skb->end = data + size;
atomic_set(&(skb_shinfo(skb)->dataref), 1);
skb_shinfo(skb)->nr_frags = 0;
skb_shinfo(skb)->tso_size = 0;
skb_shinfo(skb)->tso_segs = 0;
skb_shinfo(skb)->frag_list = NULL;
out:
return skb;
nodata:
kmem_cache_free(skbuff_head_cache, skb);
skb = NULL;
goto out;
}
static void skb_drop_fraglist(struct sk_buff *skb) static void skb_drop_fraglist(struct sk_buff *skb)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment