Commit c12e16e2 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] percpu: convert RCU

Patch from Dipankar Sarma <dipankar@in.ibm.com>

This patch convers RCU per_cpu data to use per_cpu data area
and makes it safe for cpu_possible allocation by using CPU
notifiers.
parent 0c83f291
......@@ -39,6 +39,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
#include <linux/percpu.h>
/**
* struct rcu_head - callback structure for use with RCU
......@@ -94,16 +95,16 @@ struct rcu_data {
long batch; /* Batch # for current RCU batch */
struct list_head nxtlist;
struct list_head curlist;
} ____cacheline_aligned_in_smp;
};
extern struct rcu_data rcu_data[NR_CPUS];
DECLARE_PER_CPU(struct rcu_data, rcu_data);
extern struct rcu_ctrlblk rcu_ctrlblk;
#define RCU_qsctr(cpu) (rcu_data[(cpu)].qsctr)
#define RCU_last_qsctr(cpu) (rcu_data[(cpu)].last_qsctr)
#define RCU_batch(cpu) (rcu_data[(cpu)].batch)
#define RCU_nxtlist(cpu) (rcu_data[(cpu)].nxtlist)
#define RCU_curlist(cpu) (rcu_data[(cpu)].curlist)
#define RCU_qsctr(cpu) (per_cpu(rcu_data, (cpu)).qsctr)
#define RCU_last_qsctr(cpu) (per_cpu(rcu_data, (cpu)).last_qsctr)
#define RCU_batch(cpu) (per_cpu(rcu_data, (cpu)).batch)
#define RCU_nxtlist(cpu) (per_cpu(rcu_data, (cpu)).nxtlist)
#define RCU_curlist(cpu) (per_cpu(rcu_data, (cpu)).curlist)
#define RCU_QSCTR_INVALID 0
......
......@@ -41,13 +41,14 @@
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/rcupdate.h>
/* Definition for rcupdate control block. */
struct rcu_ctrlblk rcu_ctrlblk =
{ .mutex = SPIN_LOCK_UNLOCKED, .curbatch = 1,
.maxbatch = 1, .rcu_cpu_mask = 0 };
struct rcu_data rcu_data[NR_CPUS] __cacheline_aligned;
DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
/* Fake initialization required by compiler */
static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
......@@ -198,6 +199,33 @@ void rcu_check_callbacks(int cpu, int user)
tasklet_schedule(&RCU_tasklet(cpu));
}
static void __devinit rcu_online_cpu(int cpu)
{
memset(&per_cpu(rcu_data, cpu), 0, sizeof(struct rcu_data));
tasklet_init(&RCU_tasklet(cpu), rcu_process_callbacks, 0UL);
INIT_LIST_HEAD(&RCU_nxtlist(cpu));
INIT_LIST_HEAD(&RCU_curlist(cpu));
}
static int __devinit rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
switch (action) {
case CPU_UP_PREPARE:
rcu_online_cpu(cpu);
break;
/* Space reserved for CPU_OFFLINE :) */
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block __devinitdata rcu_nb = {
.notifier_call = rcu_cpu_notify,
};
/*
* Initializes rcu mechanism. Assumed to be called early.
* That is before local timer(SMP) or jiffie timer (uniproc) is setup.
......@@ -206,16 +234,13 @@ void rcu_check_callbacks(int cpu, int user)
*/
void __init rcu_init(void)
{
int i;
memset(&rcu_data[0], 0, sizeof(rcu_data));
for (i = 0; i < NR_CPUS; i++) {
tasklet_init(&RCU_tasklet(i), rcu_process_callbacks, 0UL);
INIT_LIST_HEAD(&RCU_nxtlist(i));
INIT_LIST_HEAD(&RCU_curlist(i));
}
rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
/* Register notifier for non-boot CPUs */
register_cpu_notifier(&rcu_nb);
}
/* Because of FASTCALL declaration of complete, we use this wrapper */
static void wakeme_after_rcu(void *completion)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment