Commit 709fdce7 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Express Tiny RCU updates in terms of RCU rather than RCU-sched

This commit renames Tiny RCU functions so that the lowest level of
functionality is RCU (e.g., synchronize_rcu()) rather than RCU-sched
(e.g., synchronize_sched()).  This provides greater naming compatibility
with Tree RCU, which will in turn permit more LoC removal once
the RCU-sched and RCU-bh update-side API is removed.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
[ paulmck: Fix Tiny call_rcu()'s EXPORT_SYMBOL() in response to a bug
  report from kbuild test robot. ]
parent 45975c7d
...@@ -49,15 +49,14 @@ ...@@ -49,15 +49,14 @@
/* Exported common interfaces */ /* Exported common interfaces */
#ifdef CONFIG_TINY_RCU #ifndef CONFIG_TINY_RCU
#define call_rcu call_rcu_sched void synchronize_sched(void);
#else void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
void call_rcu(struct rcu_head *head, rcu_callback_t func);
#endif #endif
void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); void call_rcu(struct rcu_head *head, rcu_callback_t func);
void synchronize_sched(void);
void rcu_barrier_tasks(void); void rcu_barrier_tasks(void);
void synchronize_rcu(void);
static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
{ {
...@@ -68,7 +67,6 @@ static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) ...@@ -68,7 +67,6 @@ static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
void __rcu_read_lock(void); void __rcu_read_lock(void);
void __rcu_read_unlock(void); void __rcu_read_unlock(void);
void synchronize_rcu(void);
/* /*
* Defined as a macro as it is a very low level header included from * Defined as a macro as it is a very low level header included from
......
...@@ -36,9 +36,9 @@ static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp) ...@@ -36,9 +36,9 @@ static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
/* Never flag non-existent other CPUs! */ /* Never flag non-existent other CPUs! */
static inline bool rcu_eqs_special_set(int cpu) { return false; } static inline bool rcu_eqs_special_set(int cpu) { return false; }
static inline void synchronize_rcu(void) static inline void synchronize_sched(void)
{ {
synchronize_sched(); synchronize_rcu();
} }
static inline unsigned long get_state_synchronize_rcu(void) static inline unsigned long get_state_synchronize_rcu(void)
...@@ -61,16 +61,11 @@ static inline void cond_synchronize_sched(unsigned long oldstate) ...@@ -61,16 +61,11 @@ static inline void cond_synchronize_sched(unsigned long oldstate)
might_sleep(); might_sleep();
} }
static inline void synchronize_rcu_expedited(void) extern void rcu_barrier(void);
{
synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
}
extern void rcu_barrier_sched(void); static inline void rcu_barrier_sched(void)
static inline void rcu_barrier(void)
{ {
rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ rcu_barrier(); /* Only one CPU, so only one list of callbacks! */
} }
static inline void rcu_barrier_bh(void) static inline void rcu_barrier_bh(void)
...@@ -88,27 +83,36 @@ static inline void synchronize_rcu_bh_expedited(void) ...@@ -88,27 +83,36 @@ static inline void synchronize_rcu_bh_expedited(void)
synchronize_sched(); synchronize_sched();
} }
static inline void synchronize_rcu_expedited(void)
{
synchronize_sched();
}
static inline void synchronize_sched_expedited(void) static inline void synchronize_sched_expedited(void)
{ {
synchronize_sched(); synchronize_sched();
} }
static inline void kfree_call_rcu(struct rcu_head *head, static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
rcu_callback_t func) {
call_rcu(head, func);
}
static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
{ {
call_rcu(head, func); call_rcu(head, func);
} }
void rcu_sched_qs(void); void rcu_qs(void);
static inline void rcu_softirq_qs(void) static inline void rcu_softirq_qs(void)
{ {
rcu_sched_qs(); rcu_qs();
} }
#define rcu_note_context_switch(preempt) \ #define rcu_note_context_switch(preempt) \
do { \ do { \
rcu_sched_qs(); \ rcu_qs(); \
rcu_tasks_qs(current); \ rcu_tasks_qs(current); \
} while (0) } while (0)
......
...@@ -45,7 +45,6 @@ static inline void rcu_virt_note_context_switch(int cpu) ...@@ -45,7 +45,6 @@ static inline void rcu_virt_note_context_switch(int cpu)
rcu_note_context_switch(false); rcu_note_context_switch(false);
} }
void synchronize_rcu(void);
static inline void synchronize_rcu_bh(void) static inline void synchronize_rcu_bh(void)
{ {
synchronize_rcu(); synchronize_rcu();
......
...@@ -46,25 +46,25 @@ struct rcu_ctrlblk { ...@@ -46,25 +46,25 @@ struct rcu_ctrlblk {
}; };
/* Definition for rcupdate control block. */ /* Definition for rcupdate control block. */
static struct rcu_ctrlblk rcu_sched_ctrlblk = { static struct rcu_ctrlblk rcu_ctrlblk = {
.donetail = &rcu_sched_ctrlblk.rcucblist, .donetail = &rcu_ctrlblk.rcucblist,
.curtail = &rcu_sched_ctrlblk.rcucblist, .curtail = &rcu_ctrlblk.rcucblist,
}; };
void rcu_barrier_sched(void) void rcu_barrier(void)
{ {
wait_rcu_gp(call_rcu_sched); wait_rcu_gp(call_rcu);
} }
EXPORT_SYMBOL(rcu_barrier_sched); EXPORT_SYMBOL(rcu_barrier);
/* Record an rcu quiescent state. */ /* Record an rcu quiescent state. */
void rcu_sched_qs(void) void rcu_qs(void)
{ {
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
if (rcu_sched_ctrlblk.donetail != rcu_sched_ctrlblk.curtail) { if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
rcu_sched_ctrlblk.donetail = rcu_sched_ctrlblk.curtail; rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
raise_softirq(RCU_SOFTIRQ); raise_softirq(RCU_SOFTIRQ);
} }
local_irq_restore(flags); local_irq_restore(flags);
...@@ -79,7 +79,7 @@ void rcu_sched_qs(void) ...@@ -79,7 +79,7 @@ void rcu_sched_qs(void)
void rcu_check_callbacks(int user) void rcu_check_callbacks(int user)
{ {
if (user) if (user)
rcu_sched_qs(); rcu_qs();
} }
/* Invoke the RCU callbacks whose grace period has elapsed. */ /* Invoke the RCU callbacks whose grace period has elapsed. */
...@@ -90,17 +90,17 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused ...@@ -90,17 +90,17 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
/* Move the ready-to-invoke callbacks to a local list. */ /* Move the ready-to-invoke callbacks to a local list. */
local_irq_save(flags); local_irq_save(flags);
if (rcu_sched_ctrlblk.donetail == &rcu_sched_ctrlblk.rcucblist) { if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
/* No callbacks ready, so just leave. */ /* No callbacks ready, so just leave. */
local_irq_restore(flags); local_irq_restore(flags);
return; return;
} }
list = rcu_sched_ctrlblk.rcucblist; list = rcu_ctrlblk.rcucblist;
rcu_sched_ctrlblk.rcucblist = *rcu_sched_ctrlblk.donetail; rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
*rcu_sched_ctrlblk.donetail = NULL; *rcu_ctrlblk.donetail = NULL;
if (rcu_sched_ctrlblk.curtail == rcu_sched_ctrlblk.donetail) if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
rcu_sched_ctrlblk.curtail = &rcu_sched_ctrlblk.rcucblist; rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
rcu_sched_ctrlblk.donetail = &rcu_sched_ctrlblk.rcucblist; rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
local_irq_restore(flags); local_irq_restore(flags);
/* Invoke the callbacks on the local list. */ /* Invoke the callbacks on the local list. */
...@@ -125,21 +125,21 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused ...@@ -125,21 +125,21 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
* *
* Cool, huh? (Due to Josh Triplett.) * Cool, huh? (Due to Josh Triplett.)
*/ */
void synchronize_sched(void) void synchronize_rcu(void)
{ {
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) || lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map), lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_sched() in RCU read-side critical section"); "Illegal synchronize_sched() in RCU read-side critical section");
} }
EXPORT_SYMBOL_GPL(synchronize_sched); EXPORT_SYMBOL_GPL(synchronize_rcu);
/* /*
* Post an RCU callback to be invoked after the end of an RCU-sched grace * Post an RCU callback to be invoked after the end of an RCU-sched grace
* period. But since we have but one CPU, that would be after any * period. But since we have but one CPU, that would be after any
* quiescent state. * quiescent state.
*/ */
void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) void call_rcu(struct rcu_head *head, rcu_callback_t func)
{ {
unsigned long flags; unsigned long flags;
...@@ -148,16 +148,16 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) ...@@ -148,16 +148,16 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
head->next = NULL; head->next = NULL;
local_irq_save(flags); local_irq_save(flags);
*rcu_sched_ctrlblk.curtail = head; *rcu_ctrlblk.curtail = head;
rcu_sched_ctrlblk.curtail = &head->next; rcu_ctrlblk.curtail = &head->next;
local_irq_restore(flags); local_irq_restore(flags);
if (unlikely(is_idle_task(current))) { if (unlikely(is_idle_task(current))) {
/* force scheduling for rcu_sched_qs() */ /* force scheduling for rcu_qs() */
resched_cpu(0); resched_cpu(0);
} }
} }
EXPORT_SYMBOL_GPL(call_rcu_sched); EXPORT_SYMBOL_GPL(call_rcu);
void __init rcu_init(void) void __init rcu_init(void)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment