Commit 1bc32826 authored by Robert Love's avatar Robert Love Committed by Linus Torvalds

[PATCH] preempt-safe net/ code

This fixes three locations in net/ where per-CPU data could bite us
under preemption.  This is the result of an audit I did and should
constitute all of the unsafe code in net/.

In net/core/skbuff.c I did not have to introduce any code - just
rearrange the grabbing of smp_processor_id() to be in the interrupt off
region.  Pretty clean fixes.

Note in the future we can use put_cpu() and get_cpu() to grab the CPU#
safely.  I will send a patch to Marcelo so we can have a 2.4 version
(which doesn't do the preempt stuff), too...
parent eab0fed3
...@@ -1047,9 +1047,15 @@ int dev_queue_xmit(struct sk_buff *skb) ...@@ -1047,9 +1047,15 @@ int dev_queue_xmit(struct sk_buff *skb)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
if (dev->xmit_lock_owner != cpu) { if (dev->xmit_lock_owner != cpu) {
/*
* The spin_lock effectivly does a preempt lock, but
* we are about to drop that...
*/
preempt_disable();
spin_unlock(&dev->queue_lock); spin_unlock(&dev->queue_lock);
spin_lock(&dev->xmit_lock); spin_lock(&dev->xmit_lock);
dev->xmit_lock_owner = cpu; dev->xmit_lock_owner = cpu;
preempt_enable();
if (!netif_queue_stopped(dev)) { if (!netif_queue_stopped(dev)) {
if (netdev_nit) if (netdev_nit)
......
...@@ -111,33 +111,37 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) ...@@ -111,33 +111,37 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
static __inline__ struct sk_buff *skb_head_from_pool(void) static __inline__ struct sk_buff *skb_head_from_pool(void)
{ {
struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list; struct sk_buff_head *list;
struct sk_buff *skb = NULL;
if (skb_queue_len(list)) {
struct sk_buff *skb;
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
list = &skb_head_pool[smp_processor_id()].list;
if (skb_queue_len(list))
skb = __skb_dequeue(list); skb = __skb_dequeue(list);
local_irq_restore(flags); local_irq_restore(flags);
return skb; return skb;
}
return NULL;
} }
static __inline__ void skb_head_to_pool(struct sk_buff *skb) static __inline__ void skb_head_to_pool(struct sk_buff *skb)
{ {
struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list; struct sk_buff_head *list;
if (skb_queue_len(list) < sysctl_hot_list_len) {
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
list = &skb_head_pool[smp_processor_id()].list;
if (skb_queue_len(list) < sysctl_hot_list_len) {
__skb_queue_head(list, skb); __skb_queue_head(list, skb);
local_irq_restore(flags); local_irq_restore(flags);
return; return;
} }
local_irq_restore(flags);
kmem_cache_free(skbuff_head_cache, skb); kmem_cache_free(skbuff_head_cache, skb);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment