Commit 6070e0c1 authored by Yan, Zheng's avatar Yan, Zheng Committed by Sage Weil

ceph: don't early drop Fw cap

ceph_aio_write() has an optimization that marks CEPH_CAP_FILE_WR
cap dirty before data is copied to page cache and inode size is
updated. The optimization avoids slow cap revocation caused by
balance_dirty_pages(), but introduces inode size update race. If
ceph_check_caps() flushes the dirty cap before the inode size is
updated, MDS can miss the new inode size. So just remove the
optimization.
Signed-off-by: default avatarYan, Zheng <zheng.z.yan@intel.com>
Reviewed-by: default avatarGreg Farnum <greg@inktank.com>
parent 7971bd92
...@@ -724,9 +724,12 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov, ...@@ -724,9 +724,12 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (ceph_snap(inode) != CEPH_NOSNAP) if (ceph_snap(inode) != CEPH_NOSNAP)
return -EROFS; return -EROFS;
sb_start_write(inode->i_sb);
retry_snap: retry_snap:
if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
return -ENOSPC; ret = -ENOSPC;
goto out;
}
__ceph_do_pending_vmtruncate(inode); __ceph_do_pending_vmtruncate(inode);
dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n", dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n",
inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
...@@ -750,29 +753,10 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov, ...@@ -750,29 +753,10 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
ret = ceph_sync_write(file, iov->iov_base, iov->iov_len, ret = ceph_sync_write(file, iov->iov_base, iov->iov_len,
&iocb->ki_pos); &iocb->ki_pos);
} else { } else {
/* mutex_lock(&inode->i_mutex);
* buffered write; drop Fw early to avoid slow ret = __generic_file_aio_write(iocb, iov, nr_segs,
* revocation if we get stuck on balance_dirty_pages &iocb->ki_pos);
*/ mutex_unlock(&inode->i_mutex);
int dirty;
spin_lock(&ci->i_ceph_lock);
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
spin_unlock(&ci->i_ceph_lock);
ceph_put_cap_refs(ci, got);
ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
if ((ret >= 0 || ret == -EIOCBQUEUED) &&
((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host)
|| ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
err = vfs_fsync_range(file, pos, pos + ret - 1, 1);
if (err < 0)
ret = err;
}
if (dirty)
__mark_inode_dirty(inode, dirty);
goto out;
} }
if (ret >= 0) { if (ret >= 0) {
...@@ -790,12 +774,20 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov, ...@@ -790,12 +774,20 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
ceph_cap_string(got)); ceph_cap_string(got));
ceph_put_cap_refs(ci, got); ceph_put_cap_refs(ci, got);
if (ret >= 0 &&
((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) ||
ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
err = vfs_fsync_range(file, pos, pos + ret - 1, 1);
if (err < 0)
ret = err;
}
out: out:
if (ret == -EOLDSNAPC) { if (ret == -EOLDSNAPC) {
dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n", dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n",
inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len); inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len);
goto retry_snap; goto retry_snap;
} }
sb_end_write(inode->i_sb);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment