Commit bd7c5d21 authored by Rusty Russell's avatar Rusty Russell Committed by Linus Torvalds

[PATCH] minor slab cleanups for hotplug CPUs

- Move free_block prototype, and ac_entry and ac_data functions up in file.

- Use list_for_each_entry in cpuup_callback.

- Remove unneccessary g_cpucache_up in cpuup_callback.

- Make reap_timer_func take CPU number as data, and check if that is
  offline before rescheduing (noop without CONFIG_HOTPLUG_CPU).
parent 9f989ac8
...@@ -521,9 +521,19 @@ enum { ...@@ -521,9 +521,19 @@ enum {
static DEFINE_PER_CPU(struct timer_list, reap_timers); static DEFINE_PER_CPU(struct timer_list, reap_timers);
static void reap_timer_fnc(unsigned long data); static void reap_timer_fnc(unsigned long data);
static void free_block(kmem_cache_t* cachep, void** objpp, int len);
static void enable_cpucache (kmem_cache_t *cachep); static void enable_cpucache (kmem_cache_t *cachep);
static inline void ** ac_entry(struct array_cache *ac)
{
return (void**)(ac+1);
}
static inline struct array_cache *ac_data(kmem_cache_t *cachep)
{
return cachep->array[smp_processor_id()];
}
/* Cal the num objs, wastage, and bytes left over for a given slab size. */ /* Cal the num objs, wastage, and bytes left over for a given slab size. */
static void cache_estimate (unsigned long gfporder, size_t size, static void cache_estimate (unsigned long gfporder, size_t size,
int flags, size_t *left_over, unsigned int *num) int flags, size_t *left_over, unsigned int *num)
...@@ -573,6 +583,7 @@ static void start_cpu_timer(int cpu) ...@@ -573,6 +583,7 @@ static void start_cpu_timer(int cpu)
if (rt->function == NULL) { if (rt->function == NULL) {
init_timer(rt); init_timer(rt);
rt->expires = jiffies + HZ + 3*cpu; rt->expires = jiffies + HZ + 3*cpu;
rt->data = cpu;
rt->function = reap_timer_fnc; rt->function = reap_timer_fnc;
add_timer_on(rt, cpu); add_timer_on(rt, cpu);
} }
...@@ -589,16 +600,15 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, ...@@ -589,16 +600,15 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
void *hcpu) void *hcpu)
{ {
long cpu = (long)hcpu; long cpu = (long)hcpu;
struct list_head *p; kmem_cache_t* cachep;
switch (action) { switch (action) {
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
down(&cache_chain_sem); down(&cache_chain_sem);
list_for_each(p, &cache_chain) { list_for_each_entry(cachep, &cache_chain, next) {
int memsize; int memsize;
struct array_cache *nc; struct array_cache *nc;
kmem_cache_t* cachep = list_entry(p, kmem_cache_t, next);
memsize = sizeof(void*)*cachep->limit+sizeof(struct array_cache); memsize = sizeof(void*)*cachep->limit+sizeof(struct array_cache);
nc = kmalloc(memsize, GFP_KERNEL); nc = kmalloc(memsize, GFP_KERNEL);
if (!nc) if (!nc)
...@@ -618,7 +628,6 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, ...@@ -618,7 +628,6 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
up(&cache_chain_sem); up(&cache_chain_sem);
break; break;
case CPU_ONLINE: case CPU_ONLINE:
if (g_cpucache_up == FULL)
start_cpu_timer(cpu); start_cpu_timer(cpu);
break; break;
case CPU_UP_CANCELED: case CPU_UP_CANCELED:
...@@ -643,16 +652,6 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, ...@@ -643,16 +652,6 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 }; static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
static inline void ** ac_entry(struct array_cache *ac)
{
return (void**)(ac+1);
}
static inline struct array_cache *ac_data(kmem_cache_t *cachep)
{
return cachep->array[smp_processor_id()];
}
/* Initialisation. /* Initialisation.
* Called after the gfp() functions have been enabled, and before smp_init(). * Called after the gfp() functions have been enabled, and before smp_init().
*/ */
...@@ -1368,7 +1367,6 @@ static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg) ...@@ -1368,7 +1367,6 @@ static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
preempt_enable(); preempt_enable();
} }
static void free_block (kmem_cache_t* cachep, void** objpp, int len);
static void drain_array_locked(kmem_cache_t* cachep, static void drain_array_locked(kmem_cache_t* cachep,
struct array_cache *ac, int force); struct array_cache *ac, int force);
...@@ -2601,17 +2599,19 @@ static inline void cache_reap (void) ...@@ -2601,17 +2599,19 @@ static inline void cache_reap (void)
} }
/* /*
* This is a timer handler. There is on per CPU. It is called periodially * This is a timer handler. There is one per CPU. It is called periodially
* to shrink this CPU's caches. Otherwise there could be memory tied up * to shrink this CPU's caches. Otherwise there could be memory tied up
* for long periods (or for ever) due to load changes. * for long periods (or for ever) due to load changes.
*/ */
static void reap_timer_fnc(unsigned long data) static void reap_timer_fnc(unsigned long cpu)
{ {
int cpu = smp_processor_id();
struct timer_list *rt = &__get_cpu_var(reap_timers); struct timer_list *rt = &__get_cpu_var(reap_timers);
/* CPU hotplug can drag us off cpu: don't run on wrong CPU */
if (!cpu_is_offline(cpu)) {
cache_reap(); cache_reap();
mod_timer(rt, jiffies + REAPTIMEOUT_CPUC + cpu); mod_timer(rt, jiffies + REAPTIMEOUT_CPUC + cpu);
}
} }
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment