Commit b1137468 authored by J. Bruce Fields's avatar J. Bruce Fields Committed by Trond Myklebust

lockd: define host_for_each{_safe} macros

We've got a lot of loops like this, and I find them a little easier to
read with the macros.  More such loops are coming.
Signed-off-by: default avatarJ. Bruce Fields <bfields@citi.umich.edu>
[ cel: Forward-ported to 2.6.37 ]
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent bf269551
...@@ -26,6 +26,18 @@ ...@@ -26,6 +26,18 @@
#define NLM_HOST_COLLECT (120 * HZ) #define NLM_HOST_COLLECT (120 * HZ)
static struct hlist_head nlm_hosts[NLM_HOST_NRHASH]; static struct hlist_head nlm_hosts[NLM_HOST_NRHASH];
#define for_each_host(host, pos, chain, table) \
for ((chain) = (table); \
(chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
hlist_for_each_entry((host), (pos), (chain), h_hash)
#define for_each_host_safe(host, pos, next, chain, table) \
for ((chain) = (table); \
(chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
hlist_for_each_entry_safe((host), (pos), (next), \
(chain), h_hash)
static unsigned long next_gc; static unsigned long next_gc;
static int nrhosts; static int nrhosts;
static DEFINE_MUTEX(nlm_host_mutex); static DEFINE_MUTEX(nlm_host_mutex);
...@@ -453,8 +465,7 @@ void nlm_host_rebooted(const struct nlm_reboot *info) ...@@ -453,8 +465,7 @@ void nlm_host_rebooted(const struct nlm_reboot *info)
* To avoid processing a host several times, we match the nsmstate. * To avoid processing a host several times, we match the nsmstate.
*/ */
again: mutex_lock(&nlm_host_mutex); again: mutex_lock(&nlm_host_mutex);
for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { for_each_host(host, pos, chain, nlm_hosts) {
hlist_for_each_entry(host, pos, chain, h_hash) {
if (host->h_nsmhandle == nsm if (host->h_nsmhandle == nsm
&& host->h_nsmstate != info->state) { && host->h_nsmstate != info->state) {
host->h_nsmstate = info->state; host->h_nsmstate = info->state;
...@@ -476,7 +487,6 @@ again: mutex_lock(&nlm_host_mutex); ...@@ -476,7 +487,6 @@ again: mutex_lock(&nlm_host_mutex);
goto again; goto again;
} }
} }
}
mutex_unlock(&nlm_host_mutex); mutex_unlock(&nlm_host_mutex);
nsm_release(nsm); nsm_release(nsm);
} }
...@@ -497,15 +507,13 @@ nlm_shutdown_hosts(void) ...@@ -497,15 +507,13 @@ nlm_shutdown_hosts(void)
/* First, make all hosts eligible for gc */ /* First, make all hosts eligible for gc */
dprintk("lockd: nuking all hosts...\n"); dprintk("lockd: nuking all hosts...\n");
for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { for_each_host(host, pos, chain, nlm_hosts) {
hlist_for_each_entry(host, pos, chain, h_hash) {
host->h_expires = jiffies - 1; host->h_expires = jiffies - 1;
if (host->h_rpcclnt) { if (host->h_rpcclnt) {
rpc_shutdown_client(host->h_rpcclnt); rpc_shutdown_client(host->h_rpcclnt);
host->h_rpcclnt = NULL; host->h_rpcclnt = NULL;
} }
} }
}
/* Then, perform a garbage collection pass */ /* Then, perform a garbage collection pass */
nlm_gc_hosts(); nlm_gc_hosts();
...@@ -515,14 +523,12 @@ nlm_shutdown_hosts(void) ...@@ -515,14 +523,12 @@ nlm_shutdown_hosts(void)
if (nrhosts) { if (nrhosts) {
printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
dprintk("lockd: %d hosts left:\n", nrhosts); dprintk("lockd: %d hosts left:\n", nrhosts);
for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { for_each_host(host, pos, chain, nlm_hosts) {
hlist_for_each_entry(host, pos, chain, h_hash) {
dprintk(" %s (cnt %d use %d exp %ld)\n", dprintk(" %s (cnt %d use %d exp %ld)\n",
host->h_name, atomic_read(&host->h_count), host->h_name, atomic_read(&host->h_count),
host->h_inuse, host->h_expires); host->h_inuse, host->h_expires);
} }
} }
}
} }
/* /*
...@@ -538,19 +544,17 @@ nlm_gc_hosts(void) ...@@ -538,19 +544,17 @@ nlm_gc_hosts(void)
struct nlm_host *host; struct nlm_host *host;
dprintk("lockd: host garbage collection\n"); dprintk("lockd: host garbage collection\n");
for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { for_each_host(host, pos, chain, nlm_hosts)
hlist_for_each_entry(host, pos, chain, h_hash)
host->h_inuse = 0; host->h_inuse = 0;
}
/* Mark all hosts that hold locks, blocks or shares */ /* Mark all hosts that hold locks, blocks or shares */
nlmsvc_mark_resources(); nlmsvc_mark_resources();
for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { for_each_host_safe(host, pos, next, chain, nlm_hosts) {
hlist_for_each_entry_safe(host, pos, next, chain, h_hash) {
if (atomic_read(&host->h_count) || host->h_inuse if (atomic_read(&host->h_count) || host->h_inuse
|| time_before(jiffies, host->h_expires)) { || time_before(jiffies, host->h_expires)) {
dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n", dprintk("nlm_gc_hosts skipping %s "
"(cnt %d use %d exp %ld)\n",
host->h_name, atomic_read(&host->h_count), host->h_name, atomic_read(&host->h_count),
host->h_inuse, host->h_expires); host->h_inuse, host->h_expires);
continue; continue;
...@@ -561,7 +565,6 @@ nlm_gc_hosts(void) ...@@ -561,7 +565,6 @@ nlm_gc_hosts(void)
nlm_destroy_host(host); nlm_destroy_host(host);
nrhosts--; nrhosts--;
} }
}
next_gc = jiffies + NLM_HOST_COLLECT; next_gc = jiffies + NLM_HOST_COLLECT;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment