Commit 3f49bdd9 authored by Tejun Heo's avatar Tejun Heo

percpu_ref: restructure operation mode switching

Restructure atomic/percpu mode switching.

* The users of __percpu_ref_switch_to_atomic/percpu() now call a new
  function __percpu_ref_switch_mode() which calls either of the
  original switching functions depending on the current state of
  ref->force_atomic and the __PERCPU_REF_DEAD flag.  The callers no
  longer check whether switching is necessary but always invoke
  __percpu_ref_switch_mode().

* !ref->confirm_switch waiting is collected into
  __percpu_ref_switch_mode().

This patch doesn't cause any behavior differences.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 18808354
...@@ -161,16 +161,6 @@ static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref) ...@@ -161,16 +161,6 @@ static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch) percpu_ref_func_t *confirm_switch)
{ {
/*
* If the previous ATOMIC switching hasn't finished yet, wait for
* its completion. If the caller ensures that ATOMIC switching
* isn't in progress, this function can be called from any context.
* Do an extra confirm_switch test to circumvent the unconditional
* might_sleep() in wait_event().
*/
if (ref->confirm_switch)
wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) { if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
if (confirm_switch) if (confirm_switch)
confirm_switch(ref); confirm_switch(ref);
...@@ -195,16 +185,6 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) ...@@ -195,16 +185,6 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
unsigned long __percpu *percpu_count = percpu_count_ptr(ref); unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
int cpu; int cpu;
/*
* If the previous ATOMIC switching hasn't finished yet, wait for
* its completion. If the caller ensures that ATOMIC switching
* isn't in progress, this function can be called from any context.
* Do an extra confirm_switch test to circumvent the unconditional
* might_sleep() in wait_event().
*/
if (ref->confirm_switch)
wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
BUG_ON(!percpu_count); BUG_ON(!percpu_count);
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
...@@ -225,6 +205,25 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) ...@@ -225,6 +205,25 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC); ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
} }
static void __percpu_ref_switch_mode(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch)
{
/*
* If the previous ATOMIC switching hasn't finished yet, wait for
* its completion. If the caller ensures that ATOMIC switching
* isn't in progress, this function can be called from any context.
* Do an extra confirm_switch test to circumvent the unconditional
* might_sleep() in wait_event().
*/
if (ref->confirm_switch)
wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
__percpu_ref_switch_to_atomic(ref, confirm_switch);
else
__percpu_ref_switch_to_percpu(ref);
}
/** /**
* percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
* @ref: percpu_ref to switch to atomic mode * @ref: percpu_ref to switch to atomic mode
...@@ -241,16 +240,15 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) ...@@ -241,16 +240,15 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
* operations. Note that @ref will stay in atomic mode across kill/reinit * operations. Note that @ref will stay in atomic mode across kill/reinit
* cycles until percpu_ref_switch_to_percpu() is called. * cycles until percpu_ref_switch_to_percpu() is called.
* *
* This function normally doesn't block and can be called from any context * This function may block if @ref is in the process of switching to atomic
* but it may block if @confirm_kill is specified and @ref is already in * mode. If the caller ensures that @ref is not in the process of
* the process of switching to atomic mode. In such cases, @confirm_switch * switching to atomic mode, this function can be called from any context.
* will be invoked after the switching is complete.
*/ */
void percpu_ref_switch_to_atomic(struct percpu_ref *ref, void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch) percpu_ref_func_t *confirm_switch)
{ {
ref->force_atomic = true; ref->force_atomic = true;
__percpu_ref_switch_to_atomic(ref, confirm_switch); __percpu_ref_switch_mode(ref, confirm_switch);
} }
/** /**
...@@ -267,17 +265,14 @@ void percpu_ref_switch_to_atomic(struct percpu_ref *ref, ...@@ -267,17 +265,14 @@ void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
* dying or dead, the actual switching takes place on the following * dying or dead, the actual switching takes place on the following
* percpu_ref_reinit(). * percpu_ref_reinit().
* *
* This function normally doesn't block and can be called from any context * This function may block if @ref is in the process of switching to atomic
* but it may block if @ref is in the process of switching to atomic mode * mode. If the caller ensures that @ref is not in the process of
* by percpu_ref_switch_atomic(). * switching to atomic mode, this function can be called from any context.
*/ */
void percpu_ref_switch_to_percpu(struct percpu_ref *ref) void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
{ {
ref->force_atomic = false; ref->force_atomic = false;
__percpu_ref_switch_mode(ref, NULL);
/* a dying or dead ref can't be switched to percpu mode w/o reinit */
if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD))
__percpu_ref_switch_to_percpu(ref);
} }
/** /**
...@@ -302,7 +297,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref, ...@@ -302,7 +297,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
"%s called more than once on %pf!", __func__, ref->release); "%s called more than once on %pf!", __func__, ref->release);
ref->percpu_count_ptr |= __PERCPU_REF_DEAD; ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
__percpu_ref_switch_to_atomic(ref, confirm_kill); __percpu_ref_switch_mode(ref, confirm_kill);
percpu_ref_put(ref); percpu_ref_put(ref);
} }
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
...@@ -324,7 +319,6 @@ void percpu_ref_reinit(struct percpu_ref *ref) ...@@ -324,7 +319,6 @@ void percpu_ref_reinit(struct percpu_ref *ref)
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
percpu_ref_get(ref); percpu_ref_get(ref);
if (!ref->force_atomic) __percpu_ref_switch_mode(ref, NULL);
__percpu_ref_switch_to_percpu(ref);
} }
EXPORT_SYMBOL_GPL(percpu_ref_reinit); EXPORT_SYMBOL_GPL(percpu_ref_reinit);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment