Commit b4556396 authored by Sage Weil's avatar Sage Weil

ceph: fix race between aborted requests and fill_trace

When we abort requests we need to prevent fill_trace et al from doing
anything that relies on locks held by the VFS caller.  This fixes a race
between the reply handler and the abort code, ensuring that continue
holding the dir mutex until the reply handler completes.
Signed-off-by: default avatarSage Weil <sage@newdream.net>
parent e1518c7c
...@@ -1181,6 +1181,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) ...@@ -1181,6 +1181,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
if (!req) if (!req)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mutex_init(&req->r_fill_mutex);
req->r_started = jiffies; req->r_started = jiffies;
req->r_resend_mds = -1; req->r_resend_mds = -1;
INIT_LIST_HEAD(&req->r_unsafe_dir_item); INIT_LIST_HEAD(&req->r_unsafe_dir_item);
...@@ -1715,8 +1716,16 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, ...@@ -1715,8 +1716,16 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
err = le32_to_cpu(req->r_reply_info.head->result); err = le32_to_cpu(req->r_reply_info.head->result);
} else if (err < 0) { } else if (err < 0) {
dout("aborted request %lld with %d\n", req->r_tid, err); dout("aborted request %lld with %d\n", req->r_tid, err);
/*
* ensure we aren't running concurrently with
* ceph_fill_trace or ceph_readdir_prepopulate, which
* rely on locks (dir mutex) held by our caller.
*/
mutex_lock(&req->r_fill_mutex);
req->r_err = err; req->r_err = err;
req->r_aborted = true; req->r_aborted = true;
mutex_unlock(&req->r_fill_mutex);
if (req->r_locked_dir && if (req->r_locked_dir &&
(req->r_op & CEPH_MDS_OP_WRITE)) { (req->r_op & CEPH_MDS_OP_WRITE)) {
...@@ -1861,12 +1870,14 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) ...@@ -1861,12 +1870,14 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
} }
/* insert trace into our cache */ /* insert trace into our cache */
mutex_lock(&req->r_fill_mutex);
err = ceph_fill_trace(mdsc->client->sb, req, req->r_session); err = ceph_fill_trace(mdsc->client->sb, req, req->r_session);
if (err == 0) { if (err == 0) {
if (result == 0 && rinfo->dir_nr) if (result == 0 && rinfo->dir_nr)
ceph_readdir_prepopulate(req, req->r_session); ceph_readdir_prepopulate(req, req->r_session);
ceph_unreserve_caps(&req->r_caps_reservation); ceph_unreserve_caps(&req->r_caps_reservation);
} }
mutex_unlock(&req->r_fill_mutex);
up_read(&mdsc->snap_rwsem); up_read(&mdsc->snap_rwsem);
out_err: out_err:
......
...@@ -165,6 +165,8 @@ struct ceph_mds_request { ...@@ -165,6 +165,8 @@ struct ceph_mds_request {
struct inode *r_locked_dir; /* dir (if any) i_mutex locked by vfs */ struct inode *r_locked_dir; /* dir (if any) i_mutex locked by vfs */
struct inode *r_target_inode; /* resulting inode */ struct inode *r_target_inode; /* resulting inode */
struct mutex r_fill_mutex;
union ceph_mds_request_args r_args; union ceph_mds_request_args r_args;
int r_fmode; /* file mode, if expecting cap */ int r_fmode; /* file mode, if expecting cap */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment