Commit 18fd5baa authored by Peng Tao's avatar Peng Tao Committed by Greg Kroah-Hartman

staging/lustre/libcfs: remove schedule_timeout_and_set_state

Cc: Andreas Dilger <andreas.dilger@intel.com>
Cc: Oleg Drokin <oleg.drokin@intel.com>
Signed-off-by: default avatarPeng Tao <bergwolf@gmail.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent b7efb98d
...@@ -49,7 +49,6 @@ void cfs_pause(cfs_duration_t ticks); ...@@ -49,7 +49,6 @@ void cfs_pause(cfs_duration_t ticks);
* Timer * Timer
*/ */
typedef void (cfs_timer_func_t)(ulong_ptr_t); typedef void (cfs_timer_func_t)(ulong_ptr_t);
void schedule_timeout_and_set_state(long, int64_t);
void add_wait_queue_exclusive_head(wait_queue_head_t *, wait_queue_t *); void add_wait_queue_exclusive_head(wait_queue_head_t *, wait_queue_t *);
......
...@@ -1209,8 +1209,8 @@ lnet_router_checker(void *arg) ...@@ -1209,8 +1209,8 @@ lnet_router_checker(void *arg)
/* Call cfs_pause() here always adds 1 to load average /* Call cfs_pause() here always adds 1 to load average
* because kernel counts # active tasks as nr_running * because kernel counts # active tasks as nr_running
* + nr_uninterruptible. */ * + nr_uninterruptible. */
schedule_timeout_and_set_state(TASK_INTERRUPTIBLE, set_current_state(TASK_INTERRUPTIBLE);
cfs_time_seconds(1)); schedule_timeout(cfs_time_seconds(1));
} }
LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING); LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING);
......
...@@ -192,8 +192,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, ...@@ -192,8 +192,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) { if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
int to = cfs_time_seconds(1); int to = cfs_time_seconds(1);
while (to > 0) { while (to > 0) {
schedule_timeout_and_set_state( set_current_state(TASK_INTERRUPTIBLE);
TASK_INTERRUPTIBLE, to); schedule_timeout(to);
if (lock->l_granted_mode == lock->l_req_mode || if (lock->l_granted_mode == lock->l_req_mode ||
lock->l_flags & LDLM_FL_DESTROYED) lock->l_flags & LDLM_FL_DESTROYED)
break; break;
......
...@@ -127,8 +127,8 @@ int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set) ...@@ -127,8 +127,8 @@ int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
if (ret) { if (ret) {
CERROR("cfs_fail_timeout id %x sleeping for %dms\n", CERROR("cfs_fail_timeout id %x sleeping for %dms\n",
id, ms); id, ms);
schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE, set_current_state(TASK_UNINTERRUPTIBLE);
cfs_time_seconds(ms) / 1000); schedule_timeout(cfs_time_seconds(ms) / 1000);
set_current_state(TASK_RUNNING); set_current_state(TASK_RUNNING);
CERROR("cfs_fail_timeout id %x awake\n", id); CERROR("cfs_fail_timeout id %x awake\n", id);
} }
......
...@@ -70,14 +70,6 @@ add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link) ...@@ -70,14 +70,6 @@ add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link)
} }
EXPORT_SYMBOL(add_wait_queue_exclusive_head); EXPORT_SYMBOL(add_wait_queue_exclusive_head);
void
schedule_timeout_and_set_state(long state, int64_t timeout)
{
set_current_state(state);
schedule_timeout(timeout);
}
EXPORT_SYMBOL(schedule_timeout_and_set_state);
/* deschedule for a bit... */ /* deschedule for a bit... */
void void
cfs_pause(cfs_duration_t ticks) cfs_pause(cfs_duration_t ticks)
......
...@@ -1534,8 +1534,8 @@ void obd_exports_barrier(struct obd_device *obd) ...@@ -1534,8 +1534,8 @@ void obd_exports_barrier(struct obd_device *obd)
spin_lock(&obd->obd_dev_lock); spin_lock(&obd->obd_dev_lock);
while (!list_empty(&obd->obd_unlinked_exports)) { while (!list_empty(&obd->obd_unlinked_exports)) {
spin_unlock(&obd->obd_dev_lock); spin_unlock(&obd->obd_dev_lock);
schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE, set_current_state(TASK_UNINTERRUPTIBLE);
cfs_time_seconds(waited)); schedule_timeout(cfs_time_seconds(waited));
if (waited > 5 && IS_PO2(waited)) { if (waited > 5 && IS_PO2(waited)) {
LCONSOLE_WARN("%s is waiting for obd_unlinked_exports " LCONSOLE_WARN("%s is waiting for obd_unlinked_exports "
"more than %d seconds. " "more than %d seconds. "
......
...@@ -606,7 +606,8 @@ static int echo_cleanup(struct obd_device *obd) ...@@ -606,7 +606,8 @@ static int echo_cleanup(struct obd_device *obd)
/* XXX Bug 3413; wait for a bit to ensure the BL callback has /* XXX Bug 3413; wait for a bit to ensure the BL callback has
* happened before calling ldlm_namespace_free() */ * happened before calling ldlm_namespace_free() */
schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE, cfs_time_seconds(1)); set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force); ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
obd->obd_namespace = NULL; obd->obd_namespace = NULL;
......
...@@ -997,8 +997,8 @@ static struct lu_device *echo_device_free(const struct lu_env *env, ...@@ -997,8 +997,8 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
spin_unlock(&ec->ec_lock); spin_unlock(&ec->ec_lock);
CERROR("echo_client still has objects at cleanup time, " CERROR("echo_client still has objects at cleanup time, "
"wait for 1 second\n"); "wait for 1 second\n");
schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE, set_current_state(TASK_UNINTERRUPTIBLE);
cfs_time_seconds(1)); schedule_timeout(cfs_time_seconds(1));
lu_site_purge(env, &ed->ed_site->cs_lu, -1); lu_site_purge(env, &ed->ed_site->cs_lu, -1);
spin_lock(&ec->ec_lock); spin_lock(&ec->ec_lock);
} }
......
...@@ -543,8 +543,8 @@ int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req) ...@@ -543,8 +543,8 @@ int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
"ctx (%p, fl %lx) doesn't switch, relax a little bit\n", "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
newctx, newctx->cc_flags); newctx, newctx->cc_flags);
schedule_timeout_and_set_state(TASK_INTERRUPTIBLE, set_current_state(TASK_INTERRUPTIBLE);
HZ); schedule_timeout(HZ);
} else { } else {
/* /*
* it's possible newctx == oldctx if we're switching * it's possible newctx == oldctx if we're switching
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment