Commit d3caf4d5 authored by Peng Tao's avatar Peng Tao Committed by Greg Kroah-Hartman

staging/lustre/libcfs: remove cfs_pause

Cc: Andreas Dilger <andreas.dilger@intel.com>
Cc: Oleg Drokin <oleg.drokin@intel.com>
Signed-off-by: default avatarPeng Tao <bergwolf@gmail.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 18fd5baa
...@@ -40,11 +40,6 @@ ...@@ -40,11 +40,6 @@
#ifndef __LIBCFS_PRIM_H__ #ifndef __LIBCFS_PRIM_H__
#define __LIBCFS_PRIM_H__ #define __LIBCFS_PRIM_H__
/*
* Schedule
*/
void cfs_pause(cfs_duration_t ticks);
/* /*
* Timer * Timer
*/ */
......
...@@ -2774,7 +2774,8 @@ kiblnd_base_shutdown(void) ...@@ -2774,7 +2774,8 @@ kiblnd_base_shutdown(void)
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"Waiting for %d threads to terminate\n", "Waiting for %d threads to terminate\n",
atomic_read(&kiblnd_data.kib_nthreads)); atomic_read(&kiblnd_data.kib_nthreads));
cfs_pause(cfs_time_seconds(1)); set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
} }
/* fall through */ /* fall through */
...@@ -2835,7 +2836,8 @@ kiblnd_shutdown (lnet_ni_t *ni) ...@@ -2835,7 +2836,8 @@ kiblnd_shutdown (lnet_ni_t *ni)
"%s: waiting for %d peers to disconnect\n", "%s: waiting for %d peers to disconnect\n",
libcfs_nid2str(ni->ni_nid), libcfs_nid2str(ni->ni_nid),
atomic_read(&net->ibn_npeers)); atomic_read(&net->ibn_npeers));
cfs_pause(cfs_time_seconds(1)); set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
} }
kiblnd_net_fini_pools(net); kiblnd_net_fini_pools(net);
......
...@@ -2336,7 +2336,8 @@ ksocknal_base_shutdown(void) ...@@ -2336,7 +2336,8 @@ ksocknal_base_shutdown(void)
"waiting for %d threads to terminate\n", "waiting for %d threads to terminate\n",
ksocknal_data.ksnd_nthreads); ksocknal_data.ksnd_nthreads);
read_unlock(&ksocknal_data.ksnd_global_lock); read_unlock(&ksocknal_data.ksnd_global_lock);
cfs_pause(cfs_time_seconds(1)); set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
read_lock(&ksocknal_data.ksnd_global_lock); read_lock(&ksocknal_data.ksnd_global_lock);
} }
read_unlock(&ksocknal_data.ksnd_global_lock); read_unlock(&ksocknal_data.ksnd_global_lock);
...@@ -2584,7 +2585,8 @@ ksocknal_shutdown (lnet_ni_t *ni) ...@@ -2584,7 +2585,8 @@ ksocknal_shutdown (lnet_ni_t *ni)
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"waiting for %d peers to disconnect\n", "waiting for %d peers to disconnect\n",
net->ksnn_npeers); net->ksnn_npeers);
cfs_pause(cfs_time_seconds(1)); set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
ksocknal_debug_peerhash(ni); ksocknal_debug_peerhash(ni);
......
...@@ -189,7 +189,8 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) ...@@ -189,7 +189,8 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
int bufnob; int bufnob;
if (ksocknal_data.ksnd_stall_tx != 0) { if (ksocknal_data.ksnd_stall_tx != 0) {
cfs_pause(cfs_time_seconds(ksocknal_data.ksnd_stall_tx)); set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
} }
LASSERT (tx->tx_resid != 0); LASSERT (tx->tx_resid != 0);
...@@ -345,7 +346,8 @@ ksocknal_receive (ksock_conn_t *conn) ...@@ -345,7 +346,8 @@ ksocknal_receive (ksock_conn_t *conn)
int rc; int rc;
if (ksocknal_data.ksnd_stall_rx != 0) { if (ksocknal_data.ksnd_stall_rx != 0) {
cfs_pause(cfs_time_seconds (ksocknal_data.ksnd_stall_rx)); set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
} }
rc = ksocknal_connsock_addref(conn); rc = ksocknal_connsock_addref(conn);
......
...@@ -371,7 +371,8 @@ lnet_acceptor(void *arg) ...@@ -371,7 +371,8 @@ lnet_acceptor(void *arg)
if (rc != 0) { if (rc != 0) {
if (rc != -EAGAIN) { if (rc != -EAGAIN) {
CWARN("Accept error %d: pausing...\n", rc); CWARN("Accept error %d: pausing...\n", rc);
cfs_pause(cfs_time_seconds(1)); set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
} }
continue; continue;
} }
......
...@@ -994,7 +994,8 @@ lnet_shutdown_lndnis (void) ...@@ -994,7 +994,8 @@ lnet_shutdown_lndnis (void)
"Waiting for zombie LNI %s\n", "Waiting for zombie LNI %s\n",
libcfs_nid2str(ni->ni_nid)); libcfs_nid2str(ni->ni_nid));
} }
cfs_pause(cfs_time_seconds(1)); set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
lnet_net_lock(LNET_LOCK_EX); lnet_net_lock(LNET_LOCK_EX);
continue; continue;
} }
......
...@@ -145,7 +145,8 @@ lnet_peer_tables_cleanup(void) ...@@ -145,7 +145,8 @@ lnet_peer_tables_cleanup(void)
"Waiting for %d peers on peer table\n", "Waiting for %d peers on peer table\n",
ptable->pt_number); ptable->pt_number);
} }
cfs_pause(cfs_time_seconds(1) / 2); set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1) / 2);
lnet_net_lock(i); lnet_net_lock(i);
} }
list_splice_init(&ptable->pt_deathrow, &deathrow); list_splice_init(&ptable->pt_deathrow, &deathrow);
......
...@@ -779,7 +779,8 @@ lnet_wait_known_routerstate(void) ...@@ -779,7 +779,8 @@ lnet_wait_known_routerstate(void)
if (all_known) if (all_known)
return; return;
cfs_pause(cfs_time_seconds(1)); set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
} }
} }
...@@ -1147,7 +1148,8 @@ lnet_prune_rc_data(int wait_unlink) ...@@ -1147,7 +1148,8 @@ lnet_prune_rc_data(int wait_unlink)
i++; i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for rc buffers to unlink\n"); "Waiting for rc buffers to unlink\n");
cfs_pause(cfs_time_seconds(1) / 4); set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1) / 4);
lnet_net_lock(LNET_LOCK_EX); lnet_net_lock(LNET_LOCK_EX);
} }
...@@ -1206,7 +1208,7 @@ lnet_router_checker(void *arg) ...@@ -1206,7 +1208,7 @@ lnet_router_checker(void *arg)
lnet_prune_rc_data(0); /* don't wait for UNLINK */ lnet_prune_rc_data(0); /* don't wait for UNLINK */
/* Call cfs_pause() here always adds 1 to load average /* Call schedule_timeout() here always adds 1 to load average
* because kernel counts # active tasks as nr_running * because kernel counts # active tasks as nr_running
* + nr_uninterruptible. */ * + nr_uninterruptible. */
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
......
...@@ -1346,7 +1346,8 @@ lstcon_rpc_cleanup_wait(void) ...@@ -1346,7 +1346,8 @@ lstcon_rpc_cleanup_wait(void)
mutex_unlock(&console_session.ses_mutex); mutex_unlock(&console_session.ses_mutex);
CWARN("Session is shutting down, waiting for termination of transactions\n"); CWARN("Session is shutting down, waiting for termination of transactions\n");
cfs_pause(cfs_time_seconds(1)); set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
mutex_lock(&console_session.ses_mutex); mutex_lock(&console_session.ses_mutex);
} }
......
...@@ -1585,7 +1585,8 @@ srpc_startup (void) ...@@ -1585,7 +1585,8 @@ srpc_startup (void)
spin_lock_init(&srpc_data.rpc_glock); spin_lock_init(&srpc_data.rpc_glock);
/* 1 second pause to avoid timestamp reuse */ /* 1 second pause to avoid timestamp reuse */
cfs_pause(cfs_time_seconds(1)); set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
srpc_data.rpc_matchbits = ((__u64) cfs_time_current_sec()) << 48; srpc_data.rpc_matchbits = ((__u64) cfs_time_current_sec()) << 48;
srpc_data.rpc_state = SRPC_STATE_NONE; srpc_data.rpc_state = SRPC_STATE_NONE;
......
...@@ -572,7 +572,11 @@ swi_state2str (int state) ...@@ -572,7 +572,11 @@ swi_state2str (int state)
#undef STATE2STR #undef STATE2STR
} }
#define selftest_wait_events() cfs_pause(cfs_time_seconds(1) / 10) #define selftest_wait_events() \
do { \
set_current_state(TASK_UNINTERRUPTIBLE); \
schedule_timeout(cfs_time_seconds(1) / 10); \
} while (0)
#define lst_wait_until(cond, lock, fmt, ...) \ #define lst_wait_until(cond, lock, fmt, ...) \
......
...@@ -96,7 +96,8 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock, ...@@ -96,7 +96,8 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock,
LCONSOLE_WARN("====== for current process =====\n"); LCONSOLE_WARN("====== for current process =====\n");
dump_stack(); dump_stack();
LCONSOLE_WARN("====== end =======\n"); LCONSOLE_WARN("====== end =======\n");
cfs_pause(1000 * HZ); set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1000 * HZ);
} }
cpu_relax(); cpu_relax();
} }
......
...@@ -70,15 +70,6 @@ add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link) ...@@ -70,15 +70,6 @@ add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link)
} }
EXPORT_SYMBOL(add_wait_queue_exclusive_head); EXPORT_SYMBOL(add_wait_queue_exclusive_head);
/* deschedule for a bit... */
void
cfs_pause(cfs_duration_t ticks)
{
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(ticks);
}
EXPORT_SYMBOL(cfs_pause);
void cfs_init_timer(struct timer_list *t) void cfs_init_timer(struct timer_list *t)
{ {
init_timer(t); init_timer(t);
......
...@@ -334,7 +334,8 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched) ...@@ -334,7 +334,8 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
sched->ws_nthreads, sched->ws_name); sched->ws_nthreads, sched->ws_name);
spin_unlock(&cfs_wi_data.wi_glock); spin_unlock(&cfs_wi_data.wi_glock);
cfs_pause(cfs_time_seconds(1) / 20); set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1) / 20);
spin_lock(&cfs_wi_data.wi_glock); spin_lock(&cfs_wi_data.wi_glock);
} }
...@@ -459,7 +460,8 @@ cfs_wi_shutdown (void) ...@@ -459,7 +460,8 @@ cfs_wi_shutdown (void)
while (sched->ws_nthreads != 0) { while (sched->ws_nthreads != 0) {
spin_unlock(&cfs_wi_data.wi_glock); spin_unlock(&cfs_wi_data.wi_glock);
cfs_pause(cfs_time_seconds(1) / 20); set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1) / 20);
spin_lock(&cfs_wi_data.wi_glock); spin_lock(&cfs_wi_data.wi_glock);
} }
spin_unlock(&cfs_wi_data.wi_glock); spin_unlock(&cfs_wi_data.wi_glock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment