Commit c6e70088 authored by Robert Love's avatar Robert Love Committed by Linus Torvalds

[PATCH] per-cpu data preempt-safing

This unsafe access to per-CPU data via reordering of instructions or use
of "get_cpu()".

Before anyone balks at the brlock.h fix, note this was in the
alternative version of the code which is not used by default.
parent 7f644d00
...@@ -85,7 +85,8 @@ static inline void br_read_lock (enum brlock_indices idx) ...@@ -85,7 +85,8 @@ static inline void br_read_lock (enum brlock_indices idx)
if (idx >= __BR_END) if (idx >= __BR_END)
__br_lock_usage_bug(); __br_lock_usage_bug();
read_lock(&__brlock_array[smp_processor_id()][idx]); preempt_disable();
_raw_read_lock(&__brlock_array[smp_processor_id()][idx]);
} }
static inline void br_read_unlock (enum brlock_indices idx) static inline void br_read_unlock (enum brlock_indices idx)
...@@ -109,6 +110,7 @@ static inline void br_read_lock (enum brlock_indices idx) ...@@ -109,6 +110,7 @@ static inline void br_read_lock (enum brlock_indices idx)
if (idx >= __BR_END) if (idx >= __BR_END)
__br_lock_usage_bug(); __br_lock_usage_bug();
preempt_disable();
ctr = &__brlock_array[smp_processor_id()][idx]; ctr = &__brlock_array[smp_processor_id()][idx];
lock = &__br_write_locks[idx].lock; lock = &__br_write_locks[idx].lock;
again: again:
...@@ -147,6 +149,7 @@ static inline void br_read_unlock (enum brlock_indices idx) ...@@ -147,6 +149,7 @@ static inline void br_read_unlock (enum brlock_indices idx)
wmb(); wmb();
(*ctr)--; (*ctr)--;
preempt_enable();
} }
#endif /* __BRLOCK_USE_ATOMICS */ #endif /* __BRLOCK_USE_ATOMICS */
......
...@@ -514,9 +514,10 @@ static inline void __netif_schedule(struct net_device *dev) ...@@ -514,9 +514,10 @@ static inline void __netif_schedule(struct net_device *dev)
{ {
if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
unsigned long flags; unsigned long flags;
int cpu = smp_processor_id(); int cpu;
local_irq_save(flags); local_irq_save(flags);
cpu = smp_processor_id();
dev->next_sched = softnet_data[cpu].output_queue; dev->next_sched = softnet_data[cpu].output_queue;
softnet_data[cpu].output_queue = dev; softnet_data[cpu].output_queue = dev;
cpu_raise_softirq(cpu, NET_TX_SOFTIRQ); cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
...@@ -563,10 +564,11 @@ static inline int netif_running(struct net_device *dev) ...@@ -563,10 +564,11 @@ static inline int netif_running(struct net_device *dev)
static inline void dev_kfree_skb_irq(struct sk_buff *skb) static inline void dev_kfree_skb_irq(struct sk_buff *skb)
{ {
if (atomic_dec_and_test(&skb->users)) { if (atomic_dec_and_test(&skb->users)) {
int cpu =smp_processor_id(); int cpu;
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
cpu = smp_processor_id();
skb->next = softnet_data[cpu].completion_queue; skb->next = softnet_data[cpu].completion_queue;
softnet_data[cpu].completion_queue = skb; softnet_data[cpu].completion_queue = skb;
cpu_raise_softirq(cpu, NET_TX_SOFTIRQ); cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
...@@ -726,9 +728,10 @@ static inline int netif_rx_schedule_prep(struct net_device *dev) ...@@ -726,9 +728,10 @@ static inline int netif_rx_schedule_prep(struct net_device *dev)
static inline void __netif_rx_schedule(struct net_device *dev) static inline void __netif_rx_schedule(struct net_device *dev)
{ {
unsigned long flags; unsigned long flags;
int cpu = smp_processor_id(); int cpu;
local_irq_save(flags); local_irq_save(flags);
cpu = smp_processor_id();
dev_hold(dev); dev_hold(dev);
list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list); list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list);
if (dev->quota < 0) if (dev->quota < 0)
...@@ -754,11 +757,12 @@ static inline int netif_rx_reschedule(struct net_device *dev, int undo) ...@@ -754,11 +757,12 @@ static inline int netif_rx_reschedule(struct net_device *dev, int undo)
{ {
if (netif_rx_schedule_prep(dev)) { if (netif_rx_schedule_prep(dev)) {
unsigned long flags; unsigned long flags;
int cpu = smp_processor_id(); int cpu;
dev->quota += undo; dev->quota += undo;
local_irq_save(flags); local_irq_save(flags);
cpu = smp_processor_id();
list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list); list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list);
__cpu_raise_softirq(cpu, NET_RX_SOFTIRQ); __cpu_raise_softirq(cpu, NET_RX_SOFTIRQ);
local_irq_restore(flags); local_irq_restore(flags);
......
...@@ -86,9 +86,9 @@ extern void get_page_state(struct page_state *ret); ...@@ -86,9 +86,9 @@ extern void get_page_state(struct page_state *ret);
#define mod_page_state(member, delta) \ #define mod_page_state(member, delta) \
do { \ do { \
preempt_disable(); \ int cpu = get_cpu(); \
page_states[smp_processor_id()].member += (delta); \ page_states[cpu].member += (delta); \
preempt_enable(); \ put_cpu(); \
} while (0) } while (0)
#define inc_page_state(member) mod_page_state(member, 1UL) #define inc_page_state(member) mod_page_state(member, 1UL)
......
...@@ -472,6 +472,7 @@ void check_highmem_ptes(void) ...@@ -472,6 +472,7 @@ void check_highmem_ptes(void)
{ {
int idx, type; int idx, type;
preempt_disable();
for (type = 0; type < KM_TYPE_NR; type++) { for (type = 0; type < KM_TYPE_NR; type++) {
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
if (!pte_none(*(kmap_pte-idx))) { if (!pte_none(*(kmap_pte-idx))) {
...@@ -479,6 +480,7 @@ void check_highmem_ptes(void) ...@@ -479,6 +480,7 @@ void check_highmem_ptes(void)
BUG(); BUG();
} }
} }
preempt_enable();
} }
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment