Commit 9f62e960 authored by Sunil Mushran's avatar Sunil Mushran Committed by Joel Becker

ocfs2/dlm: dlm_is_lockres_migrateable() returns boolean

Patch cleans up the gunk added by commit 388c4bcb.
dlm_is_lockres_migrateable() now returns 1 if lockresource is deemed
migrateable and 0 if not.
Signed-off-by: default avatarSunil Mushran <sunil.mushran@oracle.com>
Signed-off-by: default avatarJoel Becker <jlbec@evilplan.org>
parent 10fca35f
...@@ -401,6 +401,18 @@ static inline int dlm_lvb_is_empty(char *lvb) ...@@ -401,6 +401,18 @@ static inline int dlm_lvb_is_empty(char *lvb)
return 1; return 1;
} }
static inline char *dlm_list_in_text(enum dlm_lockres_list idx)
{
if (idx == DLM_GRANTED_LIST)
return "granted";
else if (idx == DLM_CONVERTING_LIST)
return "converting";
else if (idx == DLM_BLOCKED_LIST)
return "blocked";
else
return "unknown";
}
static inline struct list_head * static inline struct list_head *
dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx) dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx)
{ {
......
...@@ -2339,65 +2339,55 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data) ...@@ -2339,65 +2339,55 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
dlm_lockres_put(res); dlm_lockres_put(res);
} }
/* Checks whether the lockres can be migrated. Returns 0 if yes, < 0 /*
* if not. If 0, numlocks is set to the number of locks in the lockres. * A migrateable resource is one that is :
* 1. locally mastered, and,
* 2. zero local locks, and,
* 3. one or more non-local locks, or, one or more references
* Returns 1 if yes, 0 if not.
*/ */
static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm, static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res, struct dlm_lock_resource *res)
int *numlocks,
int *hasrefs)
{ {
int ret; enum dlm_lockres_list idx;
int i; int nonlocal = 0, node_ref;
int count = 0;
struct list_head *queue; struct list_head *queue;
struct dlm_lock *lock; struct dlm_lock *lock;
u64 cookie;
assert_spin_locked(&res->spinlock); assert_spin_locked(&res->spinlock);
*numlocks = 0; if (res->owner != dlm->node_num)
*hasrefs = 0; return 0;
ret = -EINVAL;
if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
mlog(0, "cannot migrate lockres with unknown owner!\n");
goto leave;
}
if (res->owner != dlm->node_num) {
mlog(0, "cannot migrate lockres this node doesn't own!\n");
goto leave;
}
ret = 0; for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
queue = &res->granted; queue = dlm_list_idx_to_ptr(res, idx);
for (i = 0; i < 3; i++) {
list_for_each_entry(lock, queue, list) { list_for_each_entry(lock, queue, list) {
++count; if (lock->ml.node != dlm->node_num) {
if (lock->ml.node == dlm->node_num) { nonlocal++;
mlog(0, "found a lock owned by this node still " continue;
"on the %s queue! will not migrate this "
"lockres\n", (i == 0 ? "granted" :
(i == 1 ? "converting" :
"blocked")));
ret = -ENOTEMPTY;
goto leave;
} }
cookie = be64_to_cpu(lock->ml.cookie);
mlog(0, "%s: Not migrateable res %.*s, lock %u:%llu on "
"%s list\n", dlm->name, res->lockname.len,
res->lockname.name,
dlm_get_lock_cookie_node(cookie),
dlm_get_lock_cookie_seq(cookie),
dlm_list_in_text(idx));
return 0;
} }
queue++;
} }
*numlocks = count; if (!nonlocal) {
node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
count = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); if (node_ref >= O2NM_MAX_NODES)
if (count < O2NM_MAX_NODES) return 0;
*hasrefs = 1; }
mlog(0, "%s: res %.*s, Migrateable, locks %d, refs %d\n", dlm->name, mlog(0, "%s: res %.*s, Migrateable\n", dlm->name, res->lockname.len,
res->lockname.len, res->lockname.name, *numlocks, *hasrefs); res->lockname.name);
leave: return 1;
return ret;
} }
/* /*
...@@ -2416,7 +2406,6 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm, ...@@ -2416,7 +2406,6 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
const char *name; const char *name;
unsigned int namelen; unsigned int namelen;
int mle_added = 0; int mle_added = 0;
int numlocks, hasrefs;
int wake = 0; int wake = 0;
if (!dlm_grab(dlm)) if (!dlm_grab(dlm))
...@@ -2427,19 +2416,13 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm, ...@@ -2427,19 +2416,13 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
mlog(0, "%s: Migrating %.*s to %u\n", dlm->name, namelen, name, target); mlog(0, "%s: Migrating %.*s to %u\n", dlm->name, namelen, name, target);
/* /* Ensure this lockres is a proper candidate for migration */
* ensure this lockres is a proper candidate for migration
*/
spin_lock(&res->spinlock); spin_lock(&res->spinlock);
ret = dlm_is_lockres_migrateable(dlm, res, &numlocks, &hasrefs); ret = dlm_is_lockres_migrateable(dlm, res);
if (ret < 0) {
spin_unlock(&res->spinlock);
goto leave;
}
spin_unlock(&res->spinlock); spin_unlock(&res->spinlock);
/* no work to do */ /* No work to do */
if (numlocks == 0 && !hasrefs) if (!ret)
goto leave; goto leave;
/* /*
...@@ -2658,44 +2641,35 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm, ...@@ -2658,44 +2641,35 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
dlm_put(dlm); dlm_put(dlm);
mlog(0, "returning %d\n", ret); mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen,
name, target, ret);
return ret; return ret;
} }
#define DLM_MIGRATION_RETRY_MS 100 #define DLM_MIGRATION_RETRY_MS 100
/* Should be called only after beginning the domain leave process. /*
* Should be called only after beginning the domain leave process.
* There should not be any remaining locks on nonlocal lock resources, * There should not be any remaining locks on nonlocal lock resources,
* and there should be no local locks left on locally mastered resources. * and there should be no local locks left on locally mastered resources.
* *
* Called with the dlm spinlock held, may drop it to do migration, but * Called with the dlm spinlock held, may drop it to do migration, but
* will re-acquire before exit. * will re-acquire before exit.
* *
* Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */ * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped
*/
int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{ {
int ret; int mig, ret;
int lock_dropped = 0; int lock_dropped = 0;
int numlocks, hasrefs;
spin_lock(&res->spinlock); assert_spin_locked(&dlm->spinlock);
if (res->owner != dlm->node_num) {
if (!__dlm_lockres_unused(res)) {
mlog(ML_ERROR, "%s:%.*s: this node is not master, "
"trying to free this but locks remain\n",
dlm->name, res->lockname.len, res->lockname.name);
}
spin_unlock(&res->spinlock);
goto leave;
}
/* No need to migrate a lockres having no locks */ spin_lock(&res->spinlock);
ret = dlm_is_lockres_migrateable(dlm, res, &numlocks, &hasrefs); mig = dlm_is_lockres_migrateable(dlm, res);
if (ret >= 0 && numlocks == 0 && !hasrefs) {
spin_unlock(&res->spinlock);
goto leave;
}
spin_unlock(&res->spinlock); spin_unlock(&res->spinlock);
if (!mig)
goto leave;
/* Wheee! Migrate lockres here! Will sleep so drop spinlock. */ /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
spin_unlock(&dlm->spinlock); spin_unlock(&dlm->spinlock);
...@@ -2704,15 +2678,8 @@ int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) ...@@ -2704,15 +2678,8 @@ int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES); ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES);
if (ret >= 0) if (ret >= 0)
break; break;
if (ret == -ENOTEMPTY) { mlog(0, "%s: res %.*s, Migrate failed, retrying\n", dlm->name,
mlog(ML_ERROR, "lockres %.*s still has local locks!\n", res->lockname.len, res->lockname.name);
res->lockname.len, res->lockname.name);
BUG();
}
mlog(0, "lockres %.*s: migrate failed, "
"retrying\n", res->lockname.len,
res->lockname.name);
msleep(DLM_MIGRATION_RETRY_MS); msleep(DLM_MIGRATION_RETRY_MS);
} }
spin_lock(&dlm->spinlock); spin_lock(&dlm->spinlock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment