Commit 210f7cdc authored by NeilBrown's avatar NeilBrown Committed by Shaohua Li

percpu-refcount: support synchronous switch to atomic mode.

percpu_ref_switch_to_atomic_sync() schedules the switch to atomic mode, then
waits for it to complete.

Also export percpu_ref_switch_to_* so they can be used from modules.

This will be used in md/raid to count the number of pending write
requests to an array.
We occasionally need to check if the count is zero, but most often
we don't care.
We always want updates to the counter to be fast, as in some cases
we count every 4K page.
Signed-off-by: default avatarNeilBrown <neilb@suse.com>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarShaohua Li <shli@fb.com>
parent 55cc39f3
...@@ -99,6 +99,7 @@ int __must_check percpu_ref_init(struct percpu_ref *ref, ...@@ -99,6 +99,7 @@ int __must_check percpu_ref_init(struct percpu_ref *ref,
void percpu_ref_exit(struct percpu_ref *ref); void percpu_ref_exit(struct percpu_ref *ref);
void percpu_ref_switch_to_atomic(struct percpu_ref *ref, void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch); percpu_ref_func_t *confirm_switch);
void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
void percpu_ref_switch_to_percpu(struct percpu_ref *ref); void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref, void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill); percpu_ref_func_t *confirm_kill);
......
...@@ -260,6 +260,22 @@ void percpu_ref_switch_to_atomic(struct percpu_ref *ref, ...@@ -260,6 +260,22 @@ void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
} }
EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
/**
* percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
* @ref: percpu_ref to switch to atomic mode
*
* Schedule switching the ref to atomic mode, and wait for the
* switch to complete. Caller must ensure that no other thread
* will switch back to percpu mode.
*/
void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
{
percpu_ref_switch_to_atomic(ref, NULL);
wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
}
EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
/** /**
* percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
...@@ -290,6 +306,7 @@ void percpu_ref_switch_to_percpu(struct percpu_ref *ref) ...@@ -290,6 +306,7 @@ void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
} }
EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
/** /**
* percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment