Commit b30973f8 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

[PATCH] node-aware skb allocation

Node-aware allocation of skbs for the receive path.

Details:

  - __alloc_skb gets a new node argument and cals the node-aware
    slab functions with it.
  - netdev_alloc_skb passed the node number it gets from dev_to_node
    to it, everyone else passes -1 (any node)
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 87348136
...@@ -332,17 +332,17 @@ struct sk_buff { ...@@ -332,17 +332,17 @@ struct sk_buff {
extern void kfree_skb(struct sk_buff *skb); extern void kfree_skb(struct sk_buff *skb);
extern void __kfree_skb(struct sk_buff *skb); extern void __kfree_skb(struct sk_buff *skb);
extern struct sk_buff *__alloc_skb(unsigned int size, extern struct sk_buff *__alloc_skb(unsigned int size,
gfp_t priority, int fclone); gfp_t priority, int fclone, int node);
static inline struct sk_buff *alloc_skb(unsigned int size, static inline struct sk_buff *alloc_skb(unsigned int size,
gfp_t priority) gfp_t priority)
{ {
return __alloc_skb(size, priority, 0); return __alloc_skb(size, priority, 0, -1);
} }
static inline struct sk_buff *alloc_skb_fclone(unsigned int size, static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
gfp_t priority) gfp_t priority)
{ {
return __alloc_skb(size, priority, 1); return __alloc_skb(size, priority, 1, -1);
} }
extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
......
...@@ -132,6 +132,7 @@ EXPORT_SYMBOL(skb_truesize_bug); ...@@ -132,6 +132,7 @@ EXPORT_SYMBOL(skb_truesize_bug);
* @gfp_mask: allocation mask * @gfp_mask: allocation mask
* @fclone: allocate from fclone cache instead of head cache * @fclone: allocate from fclone cache instead of head cache
* and allocate a cloned (child) skb * and allocate a cloned (child) skb
* @node: numa node to allocate memory on
* *
* Allocate a new &sk_buff. The returned buffer has no headroom and a * Allocate a new &sk_buff. The returned buffer has no headroom and a
* tail room of size bytes. The object has a reference count of one. * tail room of size bytes. The object has a reference count of one.
...@@ -141,7 +142,7 @@ EXPORT_SYMBOL(skb_truesize_bug); ...@@ -141,7 +142,7 @@ EXPORT_SYMBOL(skb_truesize_bug);
* %GFP_ATOMIC. * %GFP_ATOMIC.
*/ */
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
int fclone) int fclone, int node)
{ {
kmem_cache_t *cache; kmem_cache_t *cache;
struct skb_shared_info *shinfo; struct skb_shared_info *shinfo;
...@@ -151,14 +152,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, ...@@ -151,14 +152,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
/* Get the HEAD */ /* Get the HEAD */
skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA); skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
if (!skb) if (!skb)
goto out; goto out;
/* Get the DATA. Size must match skb_add_mtu(). */ /* Get the DATA. Size must match skb_add_mtu(). */
size = SKB_DATA_ALIGN(size); size = SKB_DATA_ALIGN(size);
data = kmalloc_track_caller(size + sizeof(struct skb_shared_info), data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
gfp_mask); gfp_mask, node);
if (!data) if (!data)
goto nodata; goto nodata;
...@@ -267,9 +268,10 @@ struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, ...@@ -267,9 +268,10 @@ struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
unsigned int length, gfp_t gfp_mask) unsigned int length, gfp_t gfp_mask)
{ {
int node = dev->class_dev.dev ? dev_to_node(dev->class_dev.dev) : -1;
struct sk_buff *skb; struct sk_buff *skb;
skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
if (likely(skb)) { if (likely(skb)) {
skb_reserve(skb, NET_SKB_PAD); skb_reserve(skb, NET_SKB_PAD);
skb->dev = dev; skb->dev = dev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment