Commit f2145eae authored by Bruce Korb's avatar Bruce Korb Committed by Greg Kroah-Hartman

staging/lustre/dlmlock: compress out unused space

* lustre/include/lustre_dlm.h: Remove all bit fields and the unused
  weighing callback procedure.  respell LDLM_AST_DISCARD_DATA as
  LDLM_FL_AST_DISCARD_DATA to match other flags.
* .gitignore: ignore emacs temporary files
* autogen.sh: rebuild the lock bits, if autogen is available.
* contrib/bit-masks/lustre_dlm_flags.def: define the ldlm_lock flags
* contrib/bit-masks/lustre_dlm_flags.tpl: template for emitting text
* contrib/bit-masks/Makefile: construct the .c and .h files
  The .c file is for constructing a crash extension and is not
  preserved.
* contrib/bit-masks/.gitignore: ignore built products
* lustre/contrib/wireshark/packet-lustre.c: use built files instead
  of local versions of the defines.

In the rest of the modified sources, replace flag field references
with bit mask references.

* lustre/osc/osc_lock.c: removed osc_lock_weigh, too

Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-2771
Lustre-change: http://review.whamcloud.com/5312Signed-off-by: default avatarBruce Korb <bruce_korb@xyratex.com>
Reviewed-by: default avatarAndreas Dilger <andreas.dilger@intel.com>
Reviewed-by: default avatarOleg Drokin <oleg.drokin@intel.com>
Reviewed-by: default avatarKeith Mannthey <Keith.Mannthey@intel.com>
Reviewed-by: default avatarKeith Mannthey <keith.mannthey@intel.com>
Reviewed-by: <bruce.korb@gmail.com>
Signed-off-by: default avatarPeng Tao <tao.peng@emc.com>
Signed-off-by: default avatarAndreas Dilger <andreas.dilger@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent df9fcbeb
This diff is collapsed.
...@@ -51,12 +51,12 @@ ...@@ -51,12 +51,12 @@
struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock) struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock)
{ {
/* on server-side resource of lock doesn't change */ /* on server-side resource of lock doesn't change */
if (!lock->l_ns_srv) if ((lock->l_flags & LDLM_FL_NS_SRV) == 0)
spin_lock(&lock->l_lock); spin_lock(&lock->l_lock);
lock_res(lock->l_resource); lock_res(lock->l_resource);
lock->l_res_locked = 1; lock->l_flags |= LDLM_FL_RES_LOCKED;
return lock->l_resource; return lock->l_resource;
} }
EXPORT_SYMBOL(lock_res_and_lock); EXPORT_SYMBOL(lock_res_and_lock);
...@@ -67,10 +67,10 @@ EXPORT_SYMBOL(lock_res_and_lock); ...@@ -67,10 +67,10 @@ EXPORT_SYMBOL(lock_res_and_lock);
void unlock_res_and_lock(struct ldlm_lock *lock) void unlock_res_and_lock(struct ldlm_lock *lock)
{ {
/* on server-side resource of lock doesn't change */ /* on server-side resource of lock doesn't change */
lock->l_res_locked = 0; lock->l_flags &= ~LDLM_FL_RES_LOCKED;
unlock_res(lock->l_resource); unlock_res(lock->l_resource);
if (!lock->l_ns_srv) if ((lock->l_flags & LDLM_FL_NS_SRV) == 0)
spin_unlock(&lock->l_lock); spin_unlock(&lock->l_lock);
} }
EXPORT_SYMBOL(unlock_res_and_lock); EXPORT_SYMBOL(unlock_res_and_lock);
...@@ -639,7 +639,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) ...@@ -639,7 +639,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
granted: granted:
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10); OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
if (lock->l_destroyed) { if (lock->l_flags & LDLM_FL_DESTROYED) {
LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed"); LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
RETURN(0); RETURN(0);
} }
......
...@@ -199,7 +199,7 @@ void ldlm_lock_put(struct ldlm_lock *lock) ...@@ -199,7 +199,7 @@ void ldlm_lock_put(struct ldlm_lock *lock)
"final lock_put on destroyed lock, freeing it."); "final lock_put on destroyed lock, freeing it.");
res = lock->l_resource; res = lock->l_resource;
LASSERT(lock->l_destroyed); LASSERT(lock->l_flags & LDLM_FL_DESTROYED);
LASSERT(list_empty(&lock->l_res_link)); LASSERT(list_empty(&lock->l_res_link));
LASSERT(list_empty(&lock->l_pending_chain)); LASSERT(list_empty(&lock->l_pending_chain));
...@@ -254,7 +254,7 @@ int ldlm_lock_remove_from_lru(struct ldlm_lock *lock) ...@@ -254,7 +254,7 @@ int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
int rc; int rc;
ENTRY; ENTRY;
if (lock->l_ns_srv) { if (lock->l_flags & LDLM_FL_NS_SRV) {
LASSERT(list_empty(&lock->l_lru)); LASSERT(list_empty(&lock->l_lru));
RETURN(0); RETURN(0);
} }
...@@ -305,7 +305,7 @@ void ldlm_lock_touch_in_lru(struct ldlm_lock *lock) ...@@ -305,7 +305,7 @@ void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
ENTRY; ENTRY;
if (lock->l_ns_srv) { if (lock->l_flags & LDLM_FL_NS_SRV) {
LASSERT(list_empty(&lock->l_lru)); LASSERT(list_empty(&lock->l_lru));
EXIT; EXIT;
return; return;
...@@ -353,12 +353,12 @@ int ldlm_lock_destroy_internal(struct ldlm_lock *lock) ...@@ -353,12 +353,12 @@ int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
LBUG(); LBUG();
} }
if (lock->l_destroyed) { if (lock->l_flags & LDLM_FL_DESTROYED) {
LASSERT(list_empty(&lock->l_lru)); LASSERT(list_empty(&lock->l_lru));
EXIT; EXIT;
return 0; return 0;
} }
lock->l_destroyed = 1; lock->l_flags |= LDLM_FL_DESTROYED;
if (lock->l_export && lock->l_export->exp_lock_hash) { if (lock->l_export && lock->l_export->exp_lock_hash) {
/* NB: it's safe to call cfs_hash_del() even lock isn't /* NB: it's safe to call cfs_hash_del() even lock isn't
...@@ -596,7 +596,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle, ...@@ -596,7 +596,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
/* It's unlikely but possible that someone marked the lock as /* It's unlikely but possible that someone marked the lock as
* destroyed after we did handle2object on it */ * destroyed after we did handle2object on it */
if (flags == 0 && !lock->l_destroyed) { if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED)== 0)) {
lu_ref_add(&lock->l_reference, "handle", current); lu_ref_add(&lock->l_reference, "handle", current);
RETURN(lock); RETURN(lock);
} }
...@@ -606,7 +606,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle, ...@@ -606,7 +606,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
LASSERT(lock->l_resource != NULL); LASSERT(lock->l_resource != NULL);
lu_ref_add_atomic(&lock->l_reference, "handle", current); lu_ref_add_atomic(&lock->l_reference, "handle", current);
if (unlikely(lock->l_destroyed)) { if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) {
unlock_res_and_lock(lock); unlock_res_and_lock(lock);
CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock); CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
LDLM_LOCK_PUT(lock); LDLM_LOCK_PUT(lock);
...@@ -695,7 +695,7 @@ void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, ...@@ -695,7 +695,7 @@ void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
lock->l_flags |= LDLM_FL_AST_SENT; lock->l_flags |= LDLM_FL_AST_SENT;
/* If the enqueuing client said so, tell the AST recipient to /* If the enqueuing client said so, tell the AST recipient to
* discard dirty data, rather than writing back. */ * discard dirty data, rather than writing back. */
if (new->l_flags & LDLM_AST_DISCARD_DATA) if (new->l_flags & LDLM_FL_AST_DISCARD_DATA)
lock->l_flags |= LDLM_FL_DISCARD_DATA; lock->l_flags |= LDLM_FL_DISCARD_DATA;
LASSERT(list_empty(&lock->l_bl_ast)); LASSERT(list_empty(&lock->l_bl_ast));
list_add(&lock->l_bl_ast, work_list); list_add(&lock->l_bl_ast, work_list);
...@@ -873,7 +873,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) ...@@ -873,7 +873,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
(lock->l_flags & LDLM_FL_CBPENDING)) { (lock->l_flags & LDLM_FL_CBPENDING)) {
/* If we received a blocked AST and this was the last reference, /* If we received a blocked AST and this was the last reference,
* run the callback. */ * run the callback. */
if (lock->l_ns_srv && lock->l_export) if ((lock->l_flags & LDLM_FL_NS_SRV) && lock->l_export)
CERROR("FL_CBPENDING set on non-local lock--just a " CERROR("FL_CBPENDING set on non-local lock--just a "
"warning\n"); "warning\n");
...@@ -1069,7 +1069,7 @@ static void ldlm_granted_list_add_lock(struct ldlm_lock *lock, ...@@ -1069,7 +1069,7 @@ static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
ldlm_resource_dump(D_INFO, res); ldlm_resource_dump(D_INFO, res);
LDLM_DEBUG(lock, "About to add lock:"); LDLM_DEBUG(lock, "About to add lock:");
if (lock->l_destroyed) { if (lock->l_flags & LDLM_FL_DESTROYED) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n"); CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
return; return;
} }
...@@ -1203,9 +1203,7 @@ static struct ldlm_lock *search_queue(struct list_head *queue, ...@@ -1203,9 +1203,7 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
policy->l_inodebits.bits)) policy->l_inodebits.bits))
continue; continue;
if (!unref && if (!unref && (lock->l_flags & LDLM_FL_GONE_MASK))
(lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED ||
lock->l_failed))
continue; continue;
if ((flags & LDLM_FL_LOCAL_ONLY) && if ((flags & LDLM_FL_LOCAL_ONLY) &&
...@@ -1227,8 +1225,8 @@ static struct ldlm_lock *search_queue(struct list_head *queue, ...@@ -1227,8 +1225,8 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
void ldlm_lock_fail_match_locked(struct ldlm_lock *lock) void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
{ {
if (!lock->l_failed) { if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) {
lock->l_failed = 1; lock->l_flags |= LDLM_FL_FAIL_NOTIFIED;
wake_up_all(&lock->l_waitq); wake_up_all(&lock->l_waitq);
} }
} }
...@@ -1352,6 +1350,8 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, ...@@ -1352,6 +1350,8 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
ldlm_lock2handle(lock, lockh); ldlm_lock2handle(lock, lockh);
if ((flags & LDLM_FL_LVB_READY) && if ((flags & LDLM_FL_LVB_READY) &&
(!(lock->l_flags & LDLM_FL_LVB_READY))) { (!(lock->l_flags & LDLM_FL_LVB_READY))) {
__u64 wait_flags = LDLM_FL_LVB_READY |
LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
struct l_wait_info lwi; struct l_wait_info lwi;
if (lock->l_completion_ast) { if (lock->l_completion_ast) {
int err = lock->l_completion_ast(lock, int err = lock->l_completion_ast(lock,
...@@ -1373,8 +1373,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, ...@@ -1373,8 +1373,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
/* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */ /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
l_wait_event(lock->l_waitq, l_wait_event(lock->l_waitq,
lock->l_flags & LDLM_FL_LVB_READY || lock->l_flags & wait_flags,
lock->l_destroyed || lock->l_failed,
&lwi); &lwi);
if (!(lock->l_flags & LDLM_FL_LVB_READY)) { if (!(lock->l_flags & LDLM_FL_LVB_READY)) {
if (flags & LDLM_FL_TEST_LOCK) if (flags & LDLM_FL_TEST_LOCK)
...@@ -1431,8 +1430,7 @@ ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh, ...@@ -1431,8 +1430,7 @@ ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
lock = ldlm_handle2lock(lockh); lock = ldlm_handle2lock(lockh);
if (lock != NULL) { if (lock != NULL) {
lock_res_and_lock(lock); lock_res_and_lock(lock);
if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED || if (lock->l_flags & LDLM_FL_GONE_MASK)
lock->l_failed)
GOTO(out, mode); GOTO(out, mode);
if (lock->l_flags & LDLM_FL_CBPENDING && if (lock->l_flags & LDLM_FL_CBPENDING &&
...@@ -1583,12 +1581,12 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, ...@@ -1583,12 +1581,12 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
lock->l_req_mode = mode; lock->l_req_mode = mode;
lock->l_ast_data = data; lock->l_ast_data = data;
lock->l_pid = current_pid(); lock->l_pid = current_pid();
lock->l_ns_srv = !!ns_is_server(ns); if (ns_is_server(ns))
lock->l_flags |= LDLM_FL_NS_SRV;
if (cbs) { if (cbs) {
lock->l_blocking_ast = cbs->lcs_blocking; lock->l_blocking_ast = cbs->lcs_blocking;
lock->l_completion_ast = cbs->lcs_completion; lock->l_completion_ast = cbs->lcs_completion;
lock->l_glimpse_ast = cbs->lcs_glimpse; lock->l_glimpse_ast = cbs->lcs_glimpse;
lock->l_weigh_ast = cbs->lcs_weigh;
} }
lock->l_tree_node = NULL; lock->l_tree_node = NULL;
...@@ -1693,7 +1691,7 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, ...@@ -1693,7 +1691,7 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
/* Some flags from the enqueue want to make it into the AST, via the /* Some flags from the enqueue want to make it into the AST, via the
* lock's l_flags. */ * lock's l_flags. */
lock->l_flags |= *flags & LDLM_AST_DISCARD_DATA; lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA;
/* This distinction between local lock trees is very important; a client /* This distinction between local lock trees is very important; a client
* namespace only has information about locks taken by that client, and * namespace only has information about locks taken by that client, and
...@@ -2046,15 +2044,15 @@ void ldlm_lock_cancel(struct ldlm_lock *lock) ...@@ -2046,15 +2044,15 @@ void ldlm_lock_cancel(struct ldlm_lock *lock)
LBUG(); LBUG();
} }
if (lock->l_waited) if (lock->l_flags & LDLM_FL_WAITED)
ldlm_del_waiting_lock(lock); ldlm_del_waiting_lock(lock);
/* Releases cancel callback. */ /* Releases cancel callback. */
ldlm_cancel_callback(lock); ldlm_cancel_callback(lock);
/* Yes, second time, just in case it was added again while we were /* Yes, second time, just in case it was added again while we were
running with no res lock in ldlm_cancel_callback */ * running with no res lock in ldlm_cancel_callback */
if (lock->l_waited) if (lock->l_flags & LDLM_FL_WAITED)
ldlm_del_waiting_lock(lock); ldlm_del_waiting_lock(lock);
ldlm_resource_unlink_lock(lock); ldlm_resource_unlink_lock(lock);
......
...@@ -198,7 +198,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, ...@@ -198,7 +198,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
schedule_timeout_and_set_state( schedule_timeout_and_set_state(
TASK_INTERRUPTIBLE, to); TASK_INTERRUPTIBLE, to);
if (lock->l_granted_mode == lock->l_req_mode || if (lock->l_granted_mode == lock->l_req_mode ||
lock->l_destroyed) lock->l_flags & LDLM_FL_DESTROYED)
break; break;
} }
} }
...@@ -238,7 +238,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, ...@@ -238,7 +238,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
} }
lock_res_and_lock(lock); lock_res_and_lock(lock);
if (lock->l_destroyed || if ((lock->l_flags & LDLM_FL_DESTROYED) ||
lock->l_granted_mode == lock->l_req_mode) { lock->l_granted_mode == lock->l_req_mode) {
/* bug 11300: the lock has already been granted */ /* bug 11300: the lock has already been granted */
unlock_res_and_lock(lock); unlock_res_and_lock(lock);
......
...@@ -160,7 +160,7 @@ static int ldlm_completion_tail(struct ldlm_lock *lock) ...@@ -160,7 +160,7 @@ static int ldlm_completion_tail(struct ldlm_lock *lock)
long delay; long delay;
int result; int result;
if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED) { if (lock->l_flags & (LDLM_FL_DESTROYED | LDLM_FL_FAILED)) {
LDLM_DEBUG(lock, "client-side enqueue: destroyed"); LDLM_DEBUG(lock, "client-side enqueue: destroyed");
result = -EIO; result = -EIO;
} else { } else {
...@@ -888,9 +888,8 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, ...@@ -888,9 +888,8 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
} else { } else {
const struct ldlm_callback_suite cbs = { const struct ldlm_callback_suite cbs = {
.lcs_completion = einfo->ei_cb_cp, .lcs_completion = einfo->ei_cb_cp,
.lcs_blocking = einfo->ei_cb_bl, .lcs_blocking = einfo->ei_cb_bl,
.lcs_glimpse = einfo->ei_cb_gl, .lcs_glimpse = einfo->ei_cb_gl
.lcs_weigh = einfo->ei_cb_wg
}; };
lock = ldlm_lock_create(ns, res_id, einfo->ei_type, lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
einfo->ei_mode, &cbs, einfo->ei_cbdata, einfo->ei_mode, &cbs, einfo->ei_cbdata,
......
...@@ -1283,7 +1283,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head, ...@@ -1283,7 +1283,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
LDLM_DEBUG(lock, "About to add this lock:\n"); LDLM_DEBUG(lock, "About to add this lock:\n");
if (lock->l_destroyed) { if (lock->l_flags & LDLM_FL_DESTROYED) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n"); CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
return; return;
} }
...@@ -1308,7 +1308,7 @@ void ldlm_resource_insert_lock_after(struct ldlm_lock *original, ...@@ -1308,7 +1308,7 @@ void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
ldlm_resource_dump(D_INFO, res); ldlm_resource_dump(D_INFO, res);
LDLM_DEBUG(new, "About to insert this lock after %p:\n", original); LDLM_DEBUG(new, "About to insert this lock after %p:\n", original);
if (new->l_destroyed) { if (new->l_flags & LDLM_FL_DESTROYED) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n"); CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
goto out; goto out;
} }
......
...@@ -356,15 +356,12 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash, ...@@ -356,15 +356,12 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
rc = md_lock_match(ll_i2sbi(dir)->ll_md_exp, LDLM_FL_BLOCK_GRANTED, rc = md_lock_match(ll_i2sbi(dir)->ll_md_exp, LDLM_FL_BLOCK_GRANTED,
ll_inode2fid(dir), LDLM_IBITS, &policy, mode, &lockh); ll_inode2fid(dir), LDLM_IBITS, &policy, mode, &lockh);
if (!rc) { if (!rc) {
struct ldlm_enqueue_info einfo = {.ei_type = LDLM_IBITS, struct ldlm_enqueue_info einfo = {
.ei_mode = mode, .ei_type = LDLM_IBITS,
.ei_cb_bl = .ei_mode = mode,
ll_md_blocking_ast, .ei_cb_bl = ll_md_blocking_ast,
.ei_cb_cp = .ei_cb_cp = ldlm_completion_ast,
ldlm_completion_ast, };
.ei_cb_gl = NULL,
.ei_cb_wg = NULL,
.ei_cbdata = NULL};
struct lookup_intent it = { .it_op = IT_READDIR }; struct lookup_intent it = { .it_op = IT_READDIR };
struct ptlrpc_request *request; struct ptlrpc_request *request;
struct md_op_data *op_data; struct md_op_data *op_data;
......
...@@ -2290,9 +2290,11 @@ int ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) ...@@ -2290,9 +2290,11 @@ int ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
{ {
struct inode *inode = file->f_dentry->d_inode; struct inode *inode = file->f_dentry->d_inode;
struct ll_sb_info *sbi = ll_i2sbi(inode); struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ldlm_enqueue_info einfo = { .ei_type = LDLM_FLOCK, struct ldlm_enqueue_info einfo = {
.ei_cb_cp =ldlm_flock_completion_ast, .ei_type = LDLM_FLOCK,
.ei_cbdata = file_lock }; .ei_cb_cp = ldlm_flock_completion_ast,
.ei_cbdata = file_lock,
};
struct md_op_data *op_data; struct md_op_data *op_data;
struct lustre_handle lockh = {0}; struct lustre_handle lockh = {0};
ldlm_policy_data_t flock = {{0}}; ldlm_policy_data_t flock = {{0}};
...@@ -3116,11 +3118,12 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen) ...@@ -3116,11 +3118,12 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
struct lookup_intent it; struct lookup_intent it;
struct lustre_handle lockh; struct lustre_handle lockh;
ldlm_mode_t mode; ldlm_mode_t mode;
struct ldlm_enqueue_info einfo = { .ei_type = LDLM_IBITS, struct ldlm_enqueue_info einfo = {
.ei_mode = LCK_CR, .ei_type = LDLM_IBITS,
.ei_cb_bl = ll_md_blocking_ast, .ei_mode = LCK_CR,
.ei_cb_cp = ldlm_completion_ast, .ei_cb_bl = ll_md_blocking_ast,
.ei_cbdata = NULL }; .ei_cb_cp = ldlm_completion_ast,
};
int rc; int rc;
ENTRY; ENTRY;
......
...@@ -1102,9 +1102,12 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data, ...@@ -1102,9 +1102,12 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
* this and use the request from revalidate. In this case, revalidate * this and use the request from revalidate. In this case, revalidate
* never dropped its reference, so the refcounts are all OK */ * never dropped its reference, so the refcounts are all OK */
if (!it_disposition(it, DISP_ENQ_COMPLETE)) { if (!it_disposition(it, DISP_ENQ_COMPLETE)) {
struct ldlm_enqueue_info einfo = struct ldlm_enqueue_info einfo = {
{ LDLM_IBITS, it_to_lock_mode(it), cb_blocking, .ei_type = LDLM_IBITS,
ldlm_completion_ast, NULL, NULL, NULL }; .ei_mode = it_to_lock_mode(it),
.ei_cb_bl = cb_blocking,
.ei_cb_cp = ldlm_completion_ast,
};
/* For case if upper layer did not alloc fid, do it now. */ /* For case if upper layer did not alloc fid, do it now. */
if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) { if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) {
......
...@@ -900,8 +900,12 @@ static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm, ...@@ -900,8 +900,12 @@ static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
struct lustre_handle *lockh) struct lustre_handle *lockh)
{ {
struct config_llog_data *cld = (struct config_llog_data *)data; struct config_llog_data *cld = (struct config_llog_data *)data;
struct ldlm_enqueue_info einfo = { type, mode, mgc_blocking_ast, struct ldlm_enqueue_info einfo = {
ldlm_completion_ast, NULL, NULL, NULL }; .ei_type = type,
.ei_mode = mode,
.ei_cb_bl = mgc_blocking_ast,
.ei_cb_cp = ldlm_completion_ast,
};
struct ptlrpc_request *req; struct ptlrpc_request *req;
int short_limit = cld_is_sptlrpc(cld); int short_limit = cld_is_sptlrpc(cld);
int rc; int rc;
......
...@@ -89,35 +89,49 @@ static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle) ...@@ -89,35 +89,49 @@ static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
*/ */
static int osc_lock_invariant(struct osc_lock *ols) static int osc_lock_invariant(struct osc_lock *ols)
{ {
struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle); struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
struct ldlm_lock *olock = ols->ols_lock; struct ldlm_lock *olock = ols->ols_lock;
int handle_used = lustre_handle_is_used(&ols->ols_handle); int handle_used = lustre_handle_is_used(&ols->ols_handle);
return if (ergo(osc_lock_is_lockless(ols),
ergo(osc_lock_is_lockless(ols), ols->ols_locklessable && ols->ols_lock == NULL))
ols->ols_locklessable && ols->ols_lock == NULL) || return 1;
(ergo(olock != NULL, handle_used) &&
ergo(olock != NULL, /*
olock->l_handle.h_cookie == ols->ols_handle.cookie) && * If all the following "ergo"s are true, return 1, otherwise 0
/* */
* Check that ->ols_handle and ->ols_lock are consistent, but if (! ergo(olock != NULL, handle_used))
* take into account that they are set at the different time. return 0;
*/
ergo(handle_used, if (! ergo(olock != NULL,
ergo(lock != NULL && olock != NULL, lock == olock) && olock->l_handle.h_cookie == ols->ols_handle.cookie))
ergo(lock == NULL, olock == NULL)) && return 0;
ergo(ols->ols_state == OLS_CANCELLED,
olock == NULL && !handle_used) && if (! ergo(handle_used,
/* ergo(lock != NULL && olock != NULL, lock == olock) &&
* DLM lock is destroyed only after we have seen cancellation ergo(lock == NULL, olock == NULL)))
* ast. return 0;
*/ /*
ergo(olock != NULL && ols->ols_state < OLS_CANCELLED, * Check that ->ols_handle and ->ols_lock are consistent, but
!olock->l_destroyed) && * take into account that they are set at the different time.
ergo(ols->ols_state == OLS_GRANTED, */
olock != NULL && if (! ergo(ols->ols_state == OLS_CANCELLED,
olock->l_req_mode == olock->l_granted_mode && olock == NULL && !handle_used))
ols->ols_hold)); return 0;
/*
* DLM lock is destroyed only after we have seen cancellation
* ast.
*/
if (! ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
((olock->l_flags & LDLM_FL_DESTROYED) == 0)))
return 0;
if (! ergo(ols->ols_state == OLS_GRANTED,
olock != NULL &&
olock->l_req_mode == olock->l_granted_mode &&
ols->ols_hold))
return 0;
return 1;
} }
/***************************************************************************** /*****************************************************************************
...@@ -261,7 +275,7 @@ static __u64 osc_enq2ldlm_flags(__u32 enqflags) ...@@ -261,7 +275,7 @@ static __u64 osc_enq2ldlm_flags(__u32 enqflags)
if (enqflags & CEF_ASYNC) if (enqflags & CEF_ASYNC)
result |= LDLM_FL_HAS_INTENT; result |= LDLM_FL_HAS_INTENT;
if (enqflags & CEF_DISCARD_DATA) if (enqflags & CEF_DISCARD_DATA)
result |= LDLM_AST_DISCARD_DATA; result |= LDLM_FL_AST_DISCARD_DATA;
return result; return result;
} }
...@@ -896,55 +910,6 @@ static unsigned long osc_lock_weigh(const struct lu_env *env, ...@@ -896,55 +910,6 @@ static unsigned long osc_lock_weigh(const struct lu_env *env,
return cl_object_header(slice->cls_obj)->coh_pages; return cl_object_header(slice->cls_obj)->coh_pages;
} }
/**
* Get the weight of dlm lock for early cancellation.
*
* XXX: it should return the pages covered by this \a dlmlock.
*/
static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
{
struct cl_env_nest nest;
struct lu_env *env;
struct osc_lock *lock;
struct cl_lock *cll;
unsigned long weight;
ENTRY;
might_sleep();
/*
* osc_ldlm_weigh_ast has a complex context since it might be called
* because of lock canceling, or from user's input. We have to make
* a new environment for it. Probably it is implementation safe to use
* the upper context because cl_lock_put don't modify environment
* variables. But in case of ..
*/
env = cl_env_nested_get(&nest);
if (IS_ERR(env))
/* Mostly because lack of memory, tend to eliminate this lock*/
RETURN(0);
LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
lock = osc_ast_data_get(dlmlock);
if (lock == NULL) {
/* cl_lock was destroyed because of memory pressure.
* It is much reasonable to assign this type of lock
* a lower cost.
*/
GOTO(out, weight = 0);
}
cll = lock->ols_cl.cls_lock;
cl_lock_mutex_get(env, cll);
weight = cl_lock_weigh(env, cll);
cl_lock_mutex_put(env, cll);
osc_ast_data_put(env, lock);
EXIT;
out:
cl_env_nested_put(&nest, env);
return weight;
}
static void osc_lock_build_einfo(const struct lu_env *env, static void osc_lock_build_einfo(const struct lu_env *env,
const struct cl_lock *clock, const struct cl_lock *clock,
struct osc_lock *lock, struct osc_lock *lock,
...@@ -966,7 +931,6 @@ static void osc_lock_build_einfo(const struct lu_env *env, ...@@ -966,7 +931,6 @@ static void osc_lock_build_einfo(const struct lu_env *env,
einfo->ei_cb_bl = osc_ldlm_blocking_ast; einfo->ei_cb_bl = osc_ldlm_blocking_ast;
einfo->ei_cb_cp = osc_ldlm_completion_ast; einfo->ei_cb_cp = osc_ldlm_completion_ast;
einfo->ei_cb_gl = osc_ldlm_glimpse_ast; einfo->ei_cb_gl = osc_ldlm_glimpse_ast;
einfo->ei_cb_wg = osc_ldlm_weigh_ast;
einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */ einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment