Commit bd7c5d21 authored by Rusty Russell's avatar Rusty Russell Committed by Linus Torvalds

[PATCH] minor slab cleanups for hotplug CPUs

- Move free_block prototype, and ac_entry and ac_data functions up in file.

- Use list_for_each_entry in cpuup_callback.

- Remove unneccessary g_cpucache_up in cpuup_callback.

- Make reap_timer_func take CPU number as data, and check if that is
  offline before rescheduing (noop without CONFIG_HOTPLUG_CPU).
parent 9f989ac8
......@@ -521,9 +521,19 @@ enum {
static DEFINE_PER_CPU(struct timer_list, reap_timers);
static void reap_timer_fnc(unsigned long data);
static void free_block(kmem_cache_t* cachep, void** objpp, int len);
static void enable_cpucache (kmem_cache_t *cachep);
static inline void ** ac_entry(struct array_cache *ac)
{
return (void**)(ac+1);
}
static inline struct array_cache *ac_data(kmem_cache_t *cachep)
{
return cachep->array[smp_processor_id()];
}
/* Cal the num objs, wastage, and bytes left over for a given slab size. */
static void cache_estimate (unsigned long gfporder, size_t size,
int flags, size_t *left_over, unsigned int *num)
......@@ -573,6 +583,7 @@ static void start_cpu_timer(int cpu)
if (rt->function == NULL) {
init_timer(rt);
rt->expires = jiffies + HZ + 3*cpu;
rt->data = cpu;
rt->function = reap_timer_fnc;
add_timer_on(rt, cpu);
}
......@@ -589,16 +600,15 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
void *hcpu)
{
long cpu = (long)hcpu;
struct list_head *p;
kmem_cache_t* cachep;
switch (action) {
case CPU_UP_PREPARE:
down(&cache_chain_sem);
list_for_each(p, &cache_chain) {
list_for_each_entry(cachep, &cache_chain, next) {
int memsize;
struct array_cache *nc;
kmem_cache_t* cachep = list_entry(p, kmem_cache_t, next);
memsize = sizeof(void*)*cachep->limit+sizeof(struct array_cache);
nc = kmalloc(memsize, GFP_KERNEL);
if (!nc)
......@@ -618,8 +628,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
up(&cache_chain_sem);
break;
case CPU_ONLINE:
if (g_cpucache_up == FULL)
start_cpu_timer(cpu);
start_cpu_timer(cpu);
break;
case CPU_UP_CANCELED:
down(&cache_chain_sem);
......@@ -643,16 +652,6 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
static inline void ** ac_entry(struct array_cache *ac)
{
return (void**)(ac+1);
}
static inline struct array_cache *ac_data(kmem_cache_t *cachep)
{
return cachep->array[smp_processor_id()];
}
/* Initialisation.
* Called after the gfp() functions have been enabled, and before smp_init().
*/
......@@ -1368,7 +1367,6 @@ static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
preempt_enable();
}
static void free_block (kmem_cache_t* cachep, void** objpp, int len);
static void drain_array_locked(kmem_cache_t* cachep,
struct array_cache *ac, int force);
......@@ -2601,17 +2599,19 @@ static inline void cache_reap (void)
}
/*
* This is a timer handler. There is on per CPU. It is called periodially
* This is a timer handler. There is one per CPU. It is called periodially
* to shrink this CPU's caches. Otherwise there could be memory tied up
* for long periods (or for ever) due to load changes.
*/
static void reap_timer_fnc(unsigned long data)
static void reap_timer_fnc(unsigned long cpu)
{
int cpu = smp_processor_id();
struct timer_list *rt = &__get_cpu_var(reap_timers);
cache_reap();
mod_timer(rt, jiffies + REAPTIMEOUT_CPUC + cpu);
/* CPU hotplug can drag us off cpu: don't run on wrong CPU */
if (!cpu_is_offline(cpu)) {
cache_reap();
mod_timer(rt, jiffies + REAPTIMEOUT_CPUC + cpu);
}
}
#ifdef CONFIG_PROC_FS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment