Commit 2b0a80b0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ceph-for-5.1-rc1' of git://github.com/ceph/ceph-client

Pull ceph updates from Ilya Dryomov:
 "The highlights are:

   - rbd will now ignore discards that aren't aligned and big enough to
     actually free up some space (myself). This is controlled by the new
     alloc_size map option and can be disabled if needed.

   - support for rbd deep-flatten feature (myself). Deep-flatten allows
     "rbd flatten" to fully disconnect the clone image and its snapshots
     from the parent and make the parent snapshot removable.

   - a new round of cap handling improvements (Zheng Yan). The kernel
     client should now be much more prompt about releasing its caps and
     it is possible to put a limit on the number of caps held.

   - support for getting ceph.dir.pin extended attribute (Zheng Yan)"

* tag 'ceph-for-5.1-rc1' of git://github.com/ceph/ceph-client: (26 commits)
  Documentation: modern versions of ceph are not backed by btrfs
  rbd: advertise support for RBD_FEATURE_DEEP_FLATTEN
  rbd: whole-object write and zeroout should copyup when snapshots exist
  rbd: copyup with an empty snapshot context (aka deep-copyup)
  rbd: introduce rbd_obj_issue_copyup_ops()
  rbd: stop copying num_osd_ops in rbd_obj_issue_copyup()
  rbd: factor out __rbd_osd_req_create()
  rbd: clear ->xferred on error from rbd_obj_issue_copyup()
  rbd: remove experimental designation from kernel layering
  ceph: add mount option to limit caps count
  ceph: periodically trim stale dentries
  ceph: delete stale dentry when last reference is dropped
  ceph: remove dentry_lru file from debugfs
  ceph: touch existing cap when handling reply
  ceph: pass inclusive lend parameter to filemap_write_and_wait_range()
  rbd: round off and ignore discards that are too small
  rbd: handle DISCARD and WRITE_ZEROES separately
  rbd: get rid of obj_req->obj_request_count
  libceph: use struct_size() for kmalloc() in crush_decode()
  ceph: send cap releases more aggressively
  ...
parents 92825b02 d11ae8e0
...@@ -22,9 +22,7 @@ In contrast to cluster filesystems like GFS, OCFS2, and GPFS that rely ...@@ -22,9 +22,7 @@ In contrast to cluster filesystems like GFS, OCFS2, and GPFS that rely
on symmetric access by all clients to shared block devices, Ceph on symmetric access by all clients to shared block devices, Ceph
separates data and metadata management into independent server separates data and metadata management into independent server
clusters, similar to Lustre. Unlike Lustre, however, metadata and clusters, similar to Lustre. Unlike Lustre, however, metadata and
storage nodes run entirely as user space daemons. Storage nodes storage nodes run entirely as user space daemons. File data is striped
utilize btrfs to store data objects, leveraging its advanced features
(checksumming, metadata replication, etc.). File data is striped
across storage nodes in large chunks to distribute workload and across storage nodes in large chunks to distribute workload and
facilitate high throughputs. When storage nodes fail, data is facilitate high throughputs. When storage nodes fail, data is
re-replicated in a distributed fashion by the storage nodes themselves re-replicated in a distributed fashion by the storage nodes themselves
...@@ -118,6 +116,10 @@ Mount Options ...@@ -118,6 +116,10 @@ Mount Options
of a non-responsive Ceph file system. The default is 30 of a non-responsive Ceph file system. The default is 30
seconds. seconds.
caps_max=X
Specify the maximum number of caps to hold. Unused caps are released
when number of caps exceeds the limit. The default is 0 (no limit)
rbytes rbytes
When stat() is called on a directory, set st_size to 'rbytes', When stat() is called on a directory, set st_size to 'rbytes',
the summation of file sizes over all files nested beneath that the summation of file sizes over all files nested beneath that
...@@ -160,11 +162,11 @@ More Information ...@@ -160,11 +162,11 @@ More Information
================ ================
For more information on Ceph, see the home page at For more information on Ceph, see the home page at
http://ceph.newdream.net/ https://ceph.com/
The Linux kernel client source tree is available at The Linux kernel client source tree is available at
git://ceph.newdream.net/git/ceph-client.git https://github.com/ceph/ceph-client.git
git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
and the source for the full system is at and the source for the full system is at
git://ceph.newdream.net/git/ceph.git https://github.com/ceph/ceph.git
This diff is collapsed.
...@@ -148,11 +148,17 @@ void ceph_caps_finalize(struct ceph_mds_client *mdsc) ...@@ -148,11 +148,17 @@ void ceph_caps_finalize(struct ceph_mds_client *mdsc)
spin_unlock(&mdsc->caps_list_lock); spin_unlock(&mdsc->caps_list_lock);
} }
void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta) void ceph_adjust_caps_max_min(struct ceph_mds_client *mdsc,
struct ceph_mount_options *fsopt)
{ {
spin_lock(&mdsc->caps_list_lock); spin_lock(&mdsc->caps_list_lock);
mdsc->caps_min_count += delta; mdsc->caps_min_count = fsopt->max_readdir;
BUG_ON(mdsc->caps_min_count < 0); if (mdsc->caps_min_count < 1024)
mdsc->caps_min_count = 1024;
mdsc->caps_use_max = fsopt->caps_max;
if (mdsc->caps_use_max > 0 &&
mdsc->caps_use_max < mdsc->caps_min_count)
mdsc->caps_use_max = mdsc->caps_min_count;
spin_unlock(&mdsc->caps_list_lock); spin_unlock(&mdsc->caps_list_lock);
} }
...@@ -272,6 +278,7 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc, ...@@ -272,6 +278,7 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
if (!err) { if (!err) {
BUG_ON(have + alloc != need); BUG_ON(have + alloc != need);
ctx->count = need; ctx->count = need;
ctx->used = 0;
} }
spin_lock(&mdsc->caps_list_lock); spin_lock(&mdsc->caps_list_lock);
...@@ -295,13 +302,24 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc, ...@@ -295,13 +302,24 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
} }
void ceph_unreserve_caps(struct ceph_mds_client *mdsc, void ceph_unreserve_caps(struct ceph_mds_client *mdsc,
struct ceph_cap_reservation *ctx) struct ceph_cap_reservation *ctx)
{ {
bool reclaim = false;
if (!ctx->count)
return;
dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count); dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
spin_lock(&mdsc->caps_list_lock); spin_lock(&mdsc->caps_list_lock);
__ceph_unreserve_caps(mdsc, ctx->count); __ceph_unreserve_caps(mdsc, ctx->count);
ctx->count = 0; ctx->count = 0;
if (mdsc->caps_use_max > 0 &&
mdsc->caps_use_count > mdsc->caps_use_max)
reclaim = true;
spin_unlock(&mdsc->caps_list_lock); spin_unlock(&mdsc->caps_list_lock);
if (reclaim)
ceph_reclaim_caps_nr(mdsc, ctx->used);
} }
struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc, struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
...@@ -346,6 +364,7 @@ struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc, ...@@ -346,6 +364,7 @@ struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
BUG_ON(list_empty(&mdsc->caps_list)); BUG_ON(list_empty(&mdsc->caps_list));
ctx->count--; ctx->count--;
ctx->used++;
mdsc->caps_reserve_count--; mdsc->caps_reserve_count--;
mdsc->caps_use_count++; mdsc->caps_use_count++;
...@@ -500,12 +519,12 @@ static void __insert_cap_node(struct ceph_inode_info *ci, ...@@ -500,12 +519,12 @@ static void __insert_cap_node(struct ceph_inode_info *ci,
static void __cap_set_timeouts(struct ceph_mds_client *mdsc, static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
struct ceph_inode_info *ci) struct ceph_inode_info *ci)
{ {
struct ceph_mount_options *ma = mdsc->fsc->mount_options; struct ceph_mount_options *opt = mdsc->fsc->mount_options;
ci->i_hold_caps_min = round_jiffies(jiffies + ci->i_hold_caps_min = round_jiffies(jiffies +
ma->caps_wanted_delay_min * HZ); opt->caps_wanted_delay_min * HZ);
ci->i_hold_caps_max = round_jiffies(jiffies + ci->i_hold_caps_max = round_jiffies(jiffies +
ma->caps_wanted_delay_max * HZ); opt->caps_wanted_delay_max * HZ);
dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode, dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies); ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
} }
...@@ -657,6 +676,10 @@ void ceph_add_cap(struct inode *inode, ...@@ -657,6 +676,10 @@ void ceph_add_cap(struct inode *inode,
session->s_nr_caps++; session->s_nr_caps++;
spin_unlock(&session->s_cap_lock); spin_unlock(&session->s_cap_lock);
} else { } else {
spin_lock(&session->s_cap_lock);
list_move_tail(&cap->session_caps, &session->s_caps);
spin_unlock(&session->s_cap_lock);
if (cap->cap_gen < session->s_cap_gen) if (cap->cap_gen < session->s_cap_gen)
cap->issued = cap->implemented = CEPH_CAP_PIN; cap->issued = cap->implemented = CEPH_CAP_PIN;
...@@ -1081,9 +1104,7 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) ...@@ -1081,9 +1104,7 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
(!session->s_cap_reconnect || cap->cap_gen == session->s_cap_gen)) { (!session->s_cap_reconnect || cap->cap_gen == session->s_cap_gen)) {
cap->queue_release = 1; cap->queue_release = 1;
if (removed) { if (removed) {
list_add_tail(&cap->session_caps, __ceph_queue_cap_release(session, cap);
&session->s_cap_releases);
session->s_num_cap_releases++;
removed = 0; removed = 0;
} }
} else { } else {
...@@ -1245,7 +1266,7 @@ static int send_cap_msg(struct cap_msg_args *arg) ...@@ -1245,7 +1266,7 @@ static int send_cap_msg(struct cap_msg_args *arg)
* Queue cap releases when an inode is dropped from our cache. Since * Queue cap releases when an inode is dropped from our cache. Since
* inode is about to be destroyed, there is no need for i_ceph_lock. * inode is about to be destroyed, there is no need for i_ceph_lock.
*/ */
void ceph_queue_caps_release(struct inode *inode) void __ceph_remove_caps(struct inode *inode)
{ {
struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_inode_info *ci = ceph_inode(inode);
struct rb_node *p; struct rb_node *p;
...@@ -2393,6 +2414,12 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc, ...@@ -2393,6 +2414,12 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
if ((cap->issued & ci->i_flushing_caps) != if ((cap->issued & ci->i_flushing_caps) !=
ci->i_flushing_caps) { ci->i_flushing_caps) {
ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH; ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
/* encode_caps_cb() also will reset these sequence
* numbers. make sure sequence numbers in cap flush
* message match later reconnect message */
cap->seq = 0;
cap->issue_seq = 0;
cap->mseq = 0;
__kick_flushing_caps(mdsc, session, ci, __kick_flushing_caps(mdsc, session, ci,
oldest_flush_tid); oldest_flush_tid);
} else { } else {
...@@ -3880,12 +3907,10 @@ void ceph_handle_caps(struct ceph_mds_session *session, ...@@ -3880,12 +3907,10 @@ void ceph_handle_caps(struct ceph_mds_session *session,
cap->seq = seq; cap->seq = seq;
cap->issue_seq = seq; cap->issue_seq = seq;
spin_lock(&session->s_cap_lock); spin_lock(&session->s_cap_lock);
list_add_tail(&cap->session_caps, __ceph_queue_cap_release(session, cap);
&session->s_cap_releases);
session->s_num_cap_releases++;
spin_unlock(&session->s_cap_lock); spin_unlock(&session->s_cap_lock);
} }
goto flush_cap_releases; goto done;
} }
/* these will work even if we don't have a cap yet */ /* these will work even if we don't have a cap yet */
...@@ -3955,7 +3980,12 @@ void ceph_handle_caps(struct ceph_mds_session *session, ...@@ -3955,7 +3980,12 @@ void ceph_handle_caps(struct ceph_mds_session *session,
ceph_cap_op_name(op)); ceph_cap_op_name(op));
} }
goto done; done:
mutex_unlock(&session->s_mutex);
done_unlocked:
iput(inode);
ceph_put_string(extra_info.pool_ns);
return;
flush_cap_releases: flush_cap_releases:
/* /*
...@@ -3963,14 +3993,8 @@ void ceph_handle_caps(struct ceph_mds_session *session, ...@@ -3963,14 +3993,8 @@ void ceph_handle_caps(struct ceph_mds_session *session,
* along for the mds (who clearly thinks we still have this * along for the mds (who clearly thinks we still have this
* cap). * cap).
*/ */
ceph_send_cap_releases(mdsc, session); ceph_flush_cap_releases(mdsc, session);
goto done;
done:
mutex_unlock(&session->s_mutex);
done_unlocked:
iput(inode);
ceph_put_string(extra_info.pool_ns);
return;
bad: bad:
pr_err("ceph_handle_caps: corrupt message\n"); pr_err("ceph_handle_caps: corrupt message\n");
......
...@@ -139,23 +139,6 @@ static int caps_show(struct seq_file *s, void *p) ...@@ -139,23 +139,6 @@ static int caps_show(struct seq_file *s, void *p)
return 0; return 0;
} }
static int dentry_lru_show(struct seq_file *s, void *ptr)
{
struct ceph_fs_client *fsc = s->private;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_dentry_info *di;
spin_lock(&mdsc->dentry_lru_lock);
list_for_each_entry(di, &mdsc->dentry_lru, lru) {
struct dentry *dentry = di->dentry;
seq_printf(s, "%p %p\t%pd\n",
di, dentry, dentry);
}
spin_unlock(&mdsc->dentry_lru_lock);
return 0;
}
static int mds_sessions_show(struct seq_file *s, void *ptr) static int mds_sessions_show(struct seq_file *s, void *ptr)
{ {
struct ceph_fs_client *fsc = s->private; struct ceph_fs_client *fsc = s->private;
...@@ -195,7 +178,6 @@ static int mds_sessions_show(struct seq_file *s, void *ptr) ...@@ -195,7 +178,6 @@ static int mds_sessions_show(struct seq_file *s, void *ptr)
CEPH_DEFINE_SHOW_FUNC(mdsmap_show) CEPH_DEFINE_SHOW_FUNC(mdsmap_show)
CEPH_DEFINE_SHOW_FUNC(mdsc_show) CEPH_DEFINE_SHOW_FUNC(mdsc_show)
CEPH_DEFINE_SHOW_FUNC(caps_show) CEPH_DEFINE_SHOW_FUNC(caps_show)
CEPH_DEFINE_SHOW_FUNC(dentry_lru_show)
CEPH_DEFINE_SHOW_FUNC(mds_sessions_show) CEPH_DEFINE_SHOW_FUNC(mds_sessions_show)
...@@ -231,7 +213,6 @@ void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc) ...@@ -231,7 +213,6 @@ void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
debugfs_remove(fsc->debugfs_mds_sessions); debugfs_remove(fsc->debugfs_mds_sessions);
debugfs_remove(fsc->debugfs_caps); debugfs_remove(fsc->debugfs_caps);
debugfs_remove(fsc->debugfs_mdsc); debugfs_remove(fsc->debugfs_mdsc);
debugfs_remove(fsc->debugfs_dentry_lru);
} }
int ceph_fs_debugfs_init(struct ceph_fs_client *fsc) int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
...@@ -291,14 +272,6 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc) ...@@ -291,14 +272,6 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
if (!fsc->debugfs_caps) if (!fsc->debugfs_caps)
goto out; goto out;
fsc->debugfs_dentry_lru = debugfs_create_file("dentry_lru",
0400,
fsc->client->debugfs_dir,
fsc,
&dentry_lru_show_fops);
if (!fsc->debugfs_dentry_lru)
goto out;
return 0; return 0;
out: out:
......
This diff is collapsed.
...@@ -590,7 +590,8 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to, ...@@ -590,7 +590,8 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
* but it will at least behave sensibly when they are * but it will at least behave sensibly when they are
* in sequence. * in sequence.
*/ */
ret = filemap_write_and_wait_range(inode->i_mapping, off, off + len); ret = filemap_write_and_wait_range(inode->i_mapping,
off, off + len - 1);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -929,14 +930,15 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, ...@@ -929,14 +930,15 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
(write ? "write" : "read"), file, pos, (unsigned)count, (write ? "write" : "read"), file, pos, (unsigned)count,
snapc, snapc->seq); snapc, snapc->seq);
ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); ret = filemap_write_and_wait_range(inode->i_mapping,
pos, pos + count - 1);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (write) { if (write) {
int ret2 = invalidate_inode_pages2_range(inode->i_mapping, int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
pos >> PAGE_SHIFT, pos >> PAGE_SHIFT,
(pos + count) >> PAGE_SHIFT); (pos + count - 1) >> PAGE_SHIFT);
if (ret2 < 0) if (ret2 < 0)
dout("invalidate_inode_pages2_range returned %d\n", ret2); dout("invalidate_inode_pages2_range returned %d\n", ret2);
...@@ -1132,13 +1134,14 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, ...@@ -1132,13 +1134,14 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
dout("sync_write on file %p %lld~%u snapc %p seq %lld\n", dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
file, pos, (unsigned)count, snapc, snapc->seq); file, pos, (unsigned)count, snapc, snapc->seq);
ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count); ret = filemap_write_and_wait_range(inode->i_mapping,
pos, pos + count - 1);
if (ret < 0) if (ret < 0)
return ret; return ret;
ret = invalidate_inode_pages2_range(inode->i_mapping, ret = invalidate_inode_pages2_range(inode->i_mapping,
pos >> PAGE_SHIFT, pos >> PAGE_SHIFT,
(pos + count) >> PAGE_SHIFT); (pos + count - 1) >> PAGE_SHIFT);
if (ret < 0) if (ret < 0)
dout("invalidate_inode_pages2_range returned %d\n", ret); dout("invalidate_inode_pages2_range returned %d\n", ret);
......
...@@ -497,7 +497,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb) ...@@ -497,7 +497,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
ci->i_wrbuffer_ref = 0; ci->i_wrbuffer_ref = 0;
ci->i_wrbuffer_ref_head = 0; ci->i_wrbuffer_ref_head = 0;
atomic_set(&ci->i_filelock_ref, 0); atomic_set(&ci->i_filelock_ref, 0);
atomic_set(&ci->i_shared_gen, 0); atomic_set(&ci->i_shared_gen, 1);
ci->i_rdcache_gen = 0; ci->i_rdcache_gen = 0;
ci->i_rdcache_revoking = 0; ci->i_rdcache_revoking = 0;
...@@ -537,7 +537,7 @@ void ceph_destroy_inode(struct inode *inode) ...@@ -537,7 +537,7 @@ void ceph_destroy_inode(struct inode *inode)
ceph_fscache_unregister_inode_cookie(ci); ceph_fscache_unregister_inode_cookie(ci);
ceph_queue_caps_release(inode); __ceph_remove_caps(inode);
if (__ceph_has_any_quota(ci)) if (__ceph_has_any_quota(ci))
ceph_adjust_quota_realms_count(inode, false); ceph_adjust_quota_realms_count(inode, false);
...@@ -548,17 +548,22 @@ void ceph_destroy_inode(struct inode *inode) ...@@ -548,17 +548,22 @@ void ceph_destroy_inode(struct inode *inode)
*/ */
if (ci->i_snap_realm) { if (ci->i_snap_realm) {
struct ceph_mds_client *mdsc = struct ceph_mds_client *mdsc =
ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; ceph_inode_to_client(inode)->mdsc;
struct ceph_snap_realm *realm = ci->i_snap_realm; if (ceph_snap(inode) == CEPH_NOSNAP) {
struct ceph_snap_realm *realm = ci->i_snap_realm;
dout(" dropping residual ref to snap realm %p\n", realm); dout(" dropping residual ref to snap realm %p\n",
spin_lock(&realm->inodes_with_caps_lock); realm);
list_del_init(&ci->i_snap_realm_item); spin_lock(&realm->inodes_with_caps_lock);
ci->i_snap_realm = NULL; list_del_init(&ci->i_snap_realm_item);
if (realm->ino == ci->i_vino.ino) ci->i_snap_realm = NULL;
realm->inode = NULL; if (realm->ino == ci->i_vino.ino)
spin_unlock(&realm->inodes_with_caps_lock); realm->inode = NULL;
ceph_put_snap_realm(mdsc, realm); spin_unlock(&realm->inodes_with_caps_lock);
ceph_put_snap_realm(mdsc, realm);
} else {
ceph_put_snapid_map(mdsc, ci->i_snapid_map);
ci->i_snap_realm = NULL;
}
} }
kfree(ci->i_symlink); kfree(ci->i_symlink);
...@@ -776,6 +781,9 @@ static int fill_inode(struct inode *inode, struct page *locked_page, ...@@ -776,6 +781,9 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data, pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
iinfo->pool_ns_len); iinfo->pool_ns_len);
if (ceph_snap(inode) != CEPH_NOSNAP && !ci->i_snapid_map)
ci->i_snapid_map = ceph_get_snapid_map(mdsc, ceph_snap(inode));
spin_lock(&ci->i_ceph_lock); spin_lock(&ci->i_ceph_lock);
/* /*
...@@ -869,6 +877,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page, ...@@ -869,6 +877,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
ci->i_rbytes = le64_to_cpu(info->rbytes); ci->i_rbytes = le64_to_cpu(info->rbytes);
ci->i_rfiles = le64_to_cpu(info->rfiles); ci->i_rfiles = le64_to_cpu(info->rfiles);
ci->i_rsubdirs = le64_to_cpu(info->rsubdirs); ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
ci->i_dir_pin = iinfo->dir_pin;
ceph_decode_timespec64(&ci->i_rctime, &info->rctime); ceph_decode_timespec64(&ci->i_rctime, &info->rctime);
} }
} }
...@@ -899,6 +908,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page, ...@@ -899,6 +908,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
case S_IFBLK: case S_IFBLK:
case S_IFCHR: case S_IFCHR:
case S_IFSOCK: case S_IFSOCK:
inode->i_blkbits = PAGE_SHIFT;
init_special_inode(inode, inode->i_mode, inode->i_rdev); init_special_inode(inode, inode->i_mode, inode->i_rdev);
inode->i_op = &ceph_file_iops; inode->i_op = &ceph_file_iops;
break; break;
...@@ -1066,9 +1076,10 @@ static void update_dentry_lease(struct dentry *dentry, ...@@ -1066,9 +1076,10 @@ static void update_dentry_lease(struct dentry *dentry,
goto out_unlock; goto out_unlock;
di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen); di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen);
if (duration == 0) {
if (duration == 0) __ceph_dentry_dir_lease_touch(di);
goto out_unlock; goto out_unlock;
}
if (di->lease_gen == session->s_cap_gen && if (di->lease_gen == session->s_cap_gen &&
time_before(ttl, di->time)) time_before(ttl, di->time))
...@@ -1079,8 +1090,6 @@ static void update_dentry_lease(struct dentry *dentry, ...@@ -1079,8 +1090,6 @@ static void update_dentry_lease(struct dentry *dentry,
di->lease_session = NULL; di->lease_session = NULL;
} }
ceph_dentry_lru_touch(dentry);
if (!di->lease_session) if (!di->lease_session)
di->lease_session = ceph_get_mds_session(session); di->lease_session = ceph_get_mds_session(session);
di->lease_gen = session->s_cap_gen; di->lease_gen = session->s_cap_gen;
...@@ -1088,6 +1097,8 @@ static void update_dentry_lease(struct dentry *dentry, ...@@ -1088,6 +1097,8 @@ static void update_dentry_lease(struct dentry *dentry,
di->lease_renew_after = half_ttl; di->lease_renew_after = half_ttl;
di->lease_renew_from = 0; di->lease_renew_from = 0;
di->time = ttl; di->time = ttl;
__ceph_dentry_lease_touch(di);
out_unlock: out_unlock:
spin_unlock(&dentry->d_lock); spin_unlock(&dentry->d_lock);
if (old_lease_session) if (old_lease_session)
...@@ -2259,10 +2270,11 @@ int ceph_getattr(const struct path *path, struct kstat *stat, ...@@ -2259,10 +2270,11 @@ int ceph_getattr(const struct path *path, struct kstat *stat,
if (!err) { if (!err) {
generic_fillattr(inode, stat); generic_fillattr(inode, stat);
stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino); stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
if (ceph_snap(inode) != CEPH_NOSNAP) if (ceph_snap(inode) == CEPH_NOSNAP)
stat->dev = ceph_snap(inode); stat->dev = inode->i_sb->s_dev;
else else
stat->dev = 0; stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
if (S_ISDIR(inode->i_mode)) { if (S_ISDIR(inode->i_mode)) {
if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
RBYTES)) RBYTES))
......
This diff is collapsed.
...@@ -21,11 +21,14 @@ ...@@ -21,11 +21,14 @@
#define CEPHFS_FEATURE_REPLY_ENCODING 9 #define CEPHFS_FEATURE_REPLY_ENCODING 9
#define CEPHFS_FEATURE_RECLAIM_CLIENT 10 #define CEPHFS_FEATURE_RECLAIM_CLIENT 10
#define CEPHFS_FEATURE_LAZY_CAP_WANTED 11 #define CEPHFS_FEATURE_LAZY_CAP_WANTED 11
#define CEPHFS_FEATURE_MULTI_RECONNECT 12
#define CEPHFS_FEATURES_CLIENT_SUPPORTED { \ #define CEPHFS_FEATURES_CLIENT_SUPPORTED { \
0, 1, 2, 3, 4, 5, 6, 7, \ 0, 1, 2, 3, 4, 5, 6, 7, \
CEPHFS_FEATURE_MIMIC, \ CEPHFS_FEATURE_MIMIC, \
CEPHFS_FEATURE_REPLY_ENCODING, \
CEPHFS_FEATURE_LAZY_CAP_WANTED, \ CEPHFS_FEATURE_LAZY_CAP_WANTED, \
CEPHFS_FEATURE_MULTI_RECONNECT, \
} }
#define CEPHFS_FEATURES_CLIENT_REQUIRED {} #define CEPHFS_FEATURES_CLIENT_REQUIRED {}
...@@ -65,6 +68,7 @@ struct ceph_mds_reply_info_in { ...@@ -65,6 +68,7 @@ struct ceph_mds_reply_info_in {
char *pool_ns_data; char *pool_ns_data;
u64 max_bytes; u64 max_bytes;
u64 max_files; u64 max_files;
s32 dir_pin;
}; };
struct ceph_mds_reply_dir_entry { struct ceph_mds_reply_dir_entry {
...@@ -152,6 +156,7 @@ struct ceph_mds_session { ...@@ -152,6 +156,7 @@ struct ceph_mds_session {
int s_mds; int s_mds;
int s_state; int s_state;
unsigned long s_ttl; /* time until mds kills us */ unsigned long s_ttl; /* time until mds kills us */
unsigned long s_features;
u64 s_seq; /* incoming msg seq # */ u64 s_seq; /* incoming msg seq # */
struct mutex s_mutex; /* serialize session messages */ struct mutex s_mutex; /* serialize session messages */
...@@ -167,19 +172,20 @@ struct ceph_mds_session { ...@@ -167,19 +172,20 @@ struct ceph_mds_session {
/* protected by s_cap_lock */ /* protected by s_cap_lock */
spinlock_t s_cap_lock; spinlock_t s_cap_lock;
struct list_head s_caps; /* all caps issued by this session */ struct list_head s_caps; /* all caps issued by this session */
struct ceph_cap *s_cap_iterator;
int s_nr_caps, s_trim_caps; int s_nr_caps, s_trim_caps;
int s_num_cap_releases; int s_num_cap_releases;
int s_cap_reconnect; int s_cap_reconnect;
int s_readonly; int s_readonly;
struct list_head s_cap_releases; /* waiting cap_release messages */ struct list_head s_cap_releases; /* waiting cap_release messages */
struct ceph_cap *s_cap_iterator; struct work_struct s_cap_release_work;
/* protected by mutex */ /* protected by mutex */
struct list_head s_cap_flushing; /* inodes w/ flushing caps */ struct list_head s_cap_flushing; /* inodes w/ flushing caps */
unsigned long s_renew_requested; /* last time we sent a renew req */ unsigned long s_renew_requested; /* last time we sent a renew req */
u64 s_renew_seq; u64 s_renew_seq;
refcount_t s_ref; refcount_t s_ref;
struct list_head s_waiting; /* waiting requests */ struct list_head s_waiting; /* waiting requests */
struct list_head s_unsafe; /* unsafe requests */ struct list_head s_unsafe; /* unsafe requests */
}; };
...@@ -310,6 +316,15 @@ struct ceph_pool_perm { ...@@ -310,6 +316,15 @@ struct ceph_pool_perm {
char pool_ns[]; char pool_ns[];
}; };
struct ceph_snapid_map {
struct rb_node node;
struct list_head lru;
atomic_t ref;
u64 snap;
dev_t dev;
unsigned long last_used;
};
/* /*
* mds client state * mds client state
*/ */
...@@ -341,6 +356,7 @@ struct ceph_mds_client { ...@@ -341,6 +356,7 @@ struct ceph_mds_client {
struct rw_semaphore snap_rwsem; struct rw_semaphore snap_rwsem;
struct rb_root snap_realms; struct rb_root snap_realms;
struct list_head snap_empty; struct list_head snap_empty;
int num_snap_realms;
spinlock_t snap_empty_lock; /* protect snap_empty */ spinlock_t snap_empty_lock; /* protect snap_empty */
u64 last_tid; /* most recent mds request */ u64 last_tid; /* most recent mds request */
...@@ -362,6 +378,9 @@ struct ceph_mds_client { ...@@ -362,6 +378,9 @@ struct ceph_mds_client {
spinlock_t cap_dirty_lock; /* protects above items */ spinlock_t cap_dirty_lock; /* protects above items */
wait_queue_head_t cap_flushing_wq; wait_queue_head_t cap_flushing_wq;
struct work_struct cap_reclaim_work;
atomic_t cap_reclaim_pending;
/* /*
* Cap reservations * Cap reservations
* *
...@@ -378,13 +397,18 @@ struct ceph_mds_client { ...@@ -378,13 +397,18 @@ struct ceph_mds_client {
unreserved) */ unreserved) */
int caps_total_count; /* total caps allocated */ int caps_total_count; /* total caps allocated */
int caps_use_count; /* in use */ int caps_use_count; /* in use */
int caps_use_max; /* max used caps */
int caps_reserve_count; /* unused, reserved */ int caps_reserve_count; /* unused, reserved */
int caps_avail_count; /* unused, unreserved */ int caps_avail_count; /* unused, unreserved */
int caps_min_count; /* keep at least this many int caps_min_count; /* keep at least this many
(unreserved) */ (unreserved) */
spinlock_t dentry_lru_lock; spinlock_t dentry_list_lock;
struct list_head dentry_lru; struct list_head dentry_leases; /* fifo list */
int num_dentry; struct list_head dentry_dir_leases; /* lru list */
spinlock_t snapid_map_lock;
struct rb_root snapid_map_tree;
struct list_head snapid_map_lru;
struct rw_semaphore pool_perm_rwsem; struct rw_semaphore pool_perm_rwsem;
struct rb_root pool_perm_tree; struct rb_root pool_perm_tree;
...@@ -438,9 +462,12 @@ static inline void ceph_mdsc_put_request(struct ceph_mds_request *req) ...@@ -438,9 +462,12 @@ static inline void ceph_mdsc_put_request(struct ceph_mds_request *req)
kref_put(&req->r_kref, ceph_mdsc_release_request); kref_put(&req->r_kref, ceph_mdsc_release_request);
} }
extern void ceph_send_cap_releases(struct ceph_mds_client *mdsc, extern void __ceph_queue_cap_release(struct ceph_mds_session *session,
struct ceph_mds_session *session); struct ceph_cap *cap);
extern void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session);
extern void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc);
extern void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr);
extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc); extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
......
...@@ -3,12 +3,13 @@ ...@@ -3,12 +3,13 @@
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "super.h" #include "super.h"
#include "mds_client.h" #include "mds_client.h"
#include <linux/ceph/decode.h> #include <linux/ceph/decode.h>
/* unused map expires after 5 minutes */
#define CEPH_SNAPID_MAP_TIMEOUT (5 * 60 * HZ)
/* /*
* Snapshots in ceph are driven in large part by cooperation from the * Snapshots in ceph are driven in large part by cooperation from the
* client. In contrast to local file systems or file servers that * client. In contrast to local file systems or file servers that
...@@ -124,6 +125,8 @@ static struct ceph_snap_realm *ceph_create_snap_realm( ...@@ -124,6 +125,8 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
INIT_LIST_HEAD(&realm->inodes_with_caps); INIT_LIST_HEAD(&realm->inodes_with_caps);
spin_lock_init(&realm->inodes_with_caps_lock); spin_lock_init(&realm->inodes_with_caps_lock);
__insert_snap_realm(&mdsc->snap_realms, realm); __insert_snap_realm(&mdsc->snap_realms, realm);
mdsc->num_snap_realms++;
dout("create_snap_realm %llx %p\n", realm->ino, realm); dout("create_snap_realm %llx %p\n", realm->ino, realm);
return realm; return realm;
} }
...@@ -175,6 +178,7 @@ static void __destroy_snap_realm(struct ceph_mds_client *mdsc, ...@@ -175,6 +178,7 @@ static void __destroy_snap_realm(struct ceph_mds_client *mdsc,
dout("__destroy_snap_realm %p %llx\n", realm, realm->ino); dout("__destroy_snap_realm %p %llx\n", realm, realm->ino);
rb_erase(&realm->node, &mdsc->snap_realms); rb_erase(&realm->node, &mdsc->snap_realms);
mdsc->num_snap_realms--;
if (realm->parent) { if (realm->parent) {
list_del_init(&realm->child_item); list_del_init(&realm->child_item);
...@@ -986,3 +990,154 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc, ...@@ -986,3 +990,154 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
up_write(&mdsc->snap_rwsem); up_write(&mdsc->snap_rwsem);
return; return;
} }
struct ceph_snapid_map* ceph_get_snapid_map(struct ceph_mds_client *mdsc,
u64 snap)
{
struct ceph_snapid_map *sm, *exist;
struct rb_node **p, *parent;
int ret;
exist = NULL;
spin_lock(&mdsc->snapid_map_lock);
p = &mdsc->snapid_map_tree.rb_node;
while (*p) {
exist = rb_entry(*p, struct ceph_snapid_map, node);
if (snap > exist->snap) {
p = &(*p)->rb_left;
} else if (snap < exist->snap) {
p = &(*p)->rb_right;
} else {
if (atomic_inc_return(&exist->ref) == 1)
list_del_init(&exist->lru);
break;
}
exist = NULL;
}
spin_unlock(&mdsc->snapid_map_lock);
if (exist) {
dout("found snapid map %llx -> %x\n", exist->snap, exist->dev);
return exist;
}
sm = kmalloc(sizeof(*sm), GFP_NOFS);
if (!sm)
return NULL;
ret = get_anon_bdev(&sm->dev);
if (ret < 0) {
kfree(sm);
return NULL;
}
INIT_LIST_HEAD(&sm->lru);
atomic_set(&sm->ref, 1);
sm->snap = snap;
exist = NULL;
parent = NULL;
p = &mdsc->snapid_map_tree.rb_node;
spin_lock(&mdsc->snapid_map_lock);
while (*p) {
parent = *p;
exist = rb_entry(*p, struct ceph_snapid_map, node);
if (snap > exist->snap)
p = &(*p)->rb_left;
else if (snap < exist->snap)
p = &(*p)->rb_right;
else
break;
exist = NULL;
}
if (exist) {
if (atomic_inc_return(&exist->ref) == 1)
list_del_init(&exist->lru);
} else {
rb_link_node(&sm->node, parent, p);
rb_insert_color(&sm->node, &mdsc->snapid_map_tree);
}
spin_unlock(&mdsc->snapid_map_lock);
if (exist) {
free_anon_bdev(sm->dev);
kfree(sm);
dout("found snapid map %llx -> %x\n", exist->snap, exist->dev);
return exist;
}
dout("create snapid map %llx -> %x\n", sm->snap, sm->dev);
return sm;
}
void ceph_put_snapid_map(struct ceph_mds_client* mdsc,
struct ceph_snapid_map *sm)
{
if (!sm)
return;
if (atomic_dec_and_lock(&sm->ref, &mdsc->snapid_map_lock)) {
if (!RB_EMPTY_NODE(&sm->node)) {
sm->last_used = jiffies;
list_add_tail(&sm->lru, &mdsc->snapid_map_lru);
spin_unlock(&mdsc->snapid_map_lock);
} else {
/* already cleaned up by
* ceph_cleanup_snapid_map() */
spin_unlock(&mdsc->snapid_map_lock);
kfree(sm);
}
}
}
void ceph_trim_snapid_map(struct ceph_mds_client *mdsc)
{
struct ceph_snapid_map *sm;
unsigned long now;
LIST_HEAD(to_free);
spin_lock(&mdsc->snapid_map_lock);
now = jiffies;
while (!list_empty(&mdsc->snapid_map_lru)) {
sm = list_first_entry(&mdsc->snapid_map_lru,
struct ceph_snapid_map, lru);
if (time_after(sm->last_used + CEPH_SNAPID_MAP_TIMEOUT, now))
break;
rb_erase(&sm->node, &mdsc->snapid_map_tree);
list_move(&sm->lru, &to_free);
}
spin_unlock(&mdsc->snapid_map_lock);
while (!list_empty(&to_free)) {
sm = list_first_entry(&to_free, struct ceph_snapid_map, lru);
list_del(&sm->lru);
dout("trim snapid map %llx -> %x\n", sm->snap, sm->dev);
free_anon_bdev(sm->dev);
kfree(sm);
}
}
void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc)
{
struct ceph_snapid_map *sm;
struct rb_node *p;
LIST_HEAD(to_free);
spin_lock(&mdsc->snapid_map_lock);
while ((p = rb_first(&mdsc->snapid_map_tree))) {
sm = rb_entry(p, struct ceph_snapid_map, node);
rb_erase(p, &mdsc->snapid_map_tree);
RB_CLEAR_NODE(p);
list_move(&sm->lru, &to_free);
}
spin_unlock(&mdsc->snapid_map_lock);
while (!list_empty(&to_free)) {
sm = list_first_entry(&to_free, struct ceph_snapid_map, lru);
list_del(&sm->lru);
free_anon_bdev(sm->dev);
if (WARN_ON_ONCE(atomic_read(&sm->ref))) {
pr_err("snapid map %llx -> %x still in use\n",
sm->snap, sm->dev);
}
}
}
...@@ -133,6 +133,7 @@ enum { ...@@ -133,6 +133,7 @@ enum {
Opt_rasize, Opt_rasize,
Opt_caps_wanted_delay_min, Opt_caps_wanted_delay_min,
Opt_caps_wanted_delay_max, Opt_caps_wanted_delay_max,
Opt_caps_max,
Opt_readdir_max_entries, Opt_readdir_max_entries,
Opt_readdir_max_bytes, Opt_readdir_max_bytes,
Opt_congestion_kb, Opt_congestion_kb,
...@@ -175,6 +176,7 @@ static match_table_t fsopt_tokens = { ...@@ -175,6 +176,7 @@ static match_table_t fsopt_tokens = {
{Opt_rasize, "rasize=%d"}, {Opt_rasize, "rasize=%d"},
{Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"}, {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
{Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"}, {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
{Opt_caps_max, "caps_max=%d"},
{Opt_readdir_max_entries, "readdir_max_entries=%d"}, {Opt_readdir_max_entries, "readdir_max_entries=%d"},
{Opt_readdir_max_bytes, "readdir_max_bytes=%d"}, {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
{Opt_congestion_kb, "write_congestion_kb=%d"}, {Opt_congestion_kb, "write_congestion_kb=%d"},
...@@ -286,6 +288,11 @@ static int parse_fsopt_token(char *c, void *private) ...@@ -286,6 +288,11 @@ static int parse_fsopt_token(char *c, void *private)
return -EINVAL; return -EINVAL;
fsopt->caps_wanted_delay_max = intval; fsopt->caps_wanted_delay_max = intval;
break; break;
case Opt_caps_max:
if (intval < 0)
return -EINVAL;
fsopt->caps_max = intval;
break;
case Opt_readdir_max_entries: case Opt_readdir_max_entries:
if (intval < 1) if (intval < 1)
return -EINVAL; return -EINVAL;
...@@ -576,6 +583,8 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root) ...@@ -576,6 +583,8 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
seq_printf(m, ",rasize=%d", fsopt->rasize); seq_printf(m, ",rasize=%d", fsopt->rasize);
if (fsopt->congestion_kb != default_congestion_kb()) if (fsopt->congestion_kb != default_congestion_kb())
seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb); seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
if (fsopt->caps_max)
seq_printf(m, ",caps_max=%d", fsopt->caps_max);
if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT) if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
seq_printf(m, ",caps_wanted_delay_min=%d", seq_printf(m, ",caps_wanted_delay_min=%d",
fsopt->caps_wanted_delay_min); fsopt->caps_wanted_delay_min);
...@@ -671,6 +680,9 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, ...@@ -671,6 +680,9 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1); fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
if (!fsc->trunc_wq) if (!fsc->trunc_wq)
goto fail_pg_inv_wq; goto fail_pg_inv_wq;
fsc->cap_wq = alloc_workqueue("ceph-cap", 0, 1);
if (!fsc->cap_wq)
goto fail_trunc_wq;
/* set up mempools */ /* set up mempools */
err = -ENOMEM; err = -ENOMEM;
...@@ -678,13 +690,12 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, ...@@ -678,13 +690,12 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
size = sizeof (struct page *) * (page_count ? page_count : 1); size = sizeof (struct page *) * (page_count ? page_count : 1);
fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size); fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
if (!fsc->wb_pagevec_pool) if (!fsc->wb_pagevec_pool)
goto fail_trunc_wq; goto fail_cap_wq;
/* caps */
fsc->min_caps = fsopt->max_readdir;
return fsc; return fsc;
fail_cap_wq:
destroy_workqueue(fsc->cap_wq);
fail_trunc_wq: fail_trunc_wq:
destroy_workqueue(fsc->trunc_wq); destroy_workqueue(fsc->trunc_wq);
fail_pg_inv_wq: fail_pg_inv_wq:
...@@ -706,6 +717,7 @@ static void flush_fs_workqueues(struct ceph_fs_client *fsc) ...@@ -706,6 +717,7 @@ static void flush_fs_workqueues(struct ceph_fs_client *fsc)
flush_workqueue(fsc->wb_wq); flush_workqueue(fsc->wb_wq);
flush_workqueue(fsc->pg_inv_wq); flush_workqueue(fsc->pg_inv_wq);
flush_workqueue(fsc->trunc_wq); flush_workqueue(fsc->trunc_wq);
flush_workqueue(fsc->cap_wq);
} }
static void destroy_fs_client(struct ceph_fs_client *fsc) static void destroy_fs_client(struct ceph_fs_client *fsc)
...@@ -715,6 +727,7 @@ static void destroy_fs_client(struct ceph_fs_client *fsc) ...@@ -715,6 +727,7 @@ static void destroy_fs_client(struct ceph_fs_client *fsc)
destroy_workqueue(fsc->wb_wq); destroy_workqueue(fsc->wb_wq);
destroy_workqueue(fsc->pg_inv_wq); destroy_workqueue(fsc->pg_inv_wq);
destroy_workqueue(fsc->trunc_wq); destroy_workqueue(fsc->trunc_wq);
destroy_workqueue(fsc->cap_wq);
mempool_destroy(fsc->wb_pagevec_pool); mempool_destroy(fsc->wb_pagevec_pool);
......
...@@ -79,6 +79,7 @@ struct ceph_mount_options { ...@@ -79,6 +79,7 @@ struct ceph_mount_options {
int rasize; /* max readahead */ int rasize; /* max readahead */
int congestion_kb; /* max writeback in flight */ int congestion_kb; /* max writeback in flight */
int caps_wanted_delay_min, caps_wanted_delay_max; int caps_wanted_delay_min, caps_wanted_delay_max;
int caps_max;
int max_readdir; /* max readdir result (entires) */ int max_readdir; /* max readdir result (entires) */
int max_readdir_bytes; /* max readdir result (bytes) */ int max_readdir_bytes; /* max readdir result (bytes) */
...@@ -100,17 +101,18 @@ struct ceph_fs_client { ...@@ -100,17 +101,18 @@ struct ceph_fs_client {
struct ceph_client *client; struct ceph_client *client;
unsigned long mount_state; unsigned long mount_state;
int min_caps; /* min caps i added */
loff_t max_file_size; loff_t max_file_size;
struct ceph_mds_client *mdsc; struct ceph_mds_client *mdsc;
/* writeback */ /* writeback */
mempool_t *wb_pagevec_pool; mempool_t *wb_pagevec_pool;
atomic_long_t writeback_count;
struct workqueue_struct *wb_wq; struct workqueue_struct *wb_wq;
struct workqueue_struct *pg_inv_wq; struct workqueue_struct *pg_inv_wq;
struct workqueue_struct *trunc_wq; struct workqueue_struct *trunc_wq;
atomic_long_t writeback_count; struct workqueue_struct *cap_wq;
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_dentry_lru, *debugfs_caps; struct dentry *debugfs_dentry_lru, *debugfs_caps;
...@@ -260,17 +262,22 @@ struct ceph_inode_xattr { ...@@ -260,17 +262,22 @@ struct ceph_inode_xattr {
* Ceph dentry state * Ceph dentry state
*/ */
struct ceph_dentry_info { struct ceph_dentry_info {
struct dentry *dentry;
struct ceph_mds_session *lease_session; struct ceph_mds_session *lease_session;
struct list_head lease_list;
unsigned flags;
int lease_shared_gen; int lease_shared_gen;
u32 lease_gen; u32 lease_gen;
u32 lease_seq; u32 lease_seq;
unsigned long lease_renew_after, lease_renew_from; unsigned long lease_renew_after, lease_renew_from;
struct list_head lru;
struct dentry *dentry;
unsigned long time; unsigned long time;
u64 offset; u64 offset;
}; };
#define CEPH_DENTRY_REFERENCED 1
#define CEPH_DENTRY_LEASE_LIST 2
#define CEPH_DENTRY_SHRINK_LIST 4
struct ceph_inode_xattrs_info { struct ceph_inode_xattrs_info {
/* /*
* (still encoded) xattr blob. we avoid the overhead of parsing * (still encoded) xattr blob. we avoid the overhead of parsing
...@@ -318,6 +325,8 @@ struct ceph_inode_info { ...@@ -318,6 +325,8 @@ struct ceph_inode_info {
/* quotas */ /* quotas */
u64 i_max_bytes, i_max_files; u64 i_max_bytes, i_max_files;
s32 i_dir_pin;
struct rb_root i_fragtree; struct rb_root i_fragtree;
int i_fragtree_nsplits; int i_fragtree_nsplits;
struct mutex i_fragtree_mutex; struct mutex i_fragtree_mutex;
...@@ -370,7 +379,10 @@ struct ceph_inode_info { ...@@ -370,7 +379,10 @@ struct ceph_inode_info {
struct list_head i_unsafe_iops; /* uncommitted mds inode ops */ struct list_head i_unsafe_iops; /* uncommitted mds inode ops */
spinlock_t i_unsafe_lock; spinlock_t i_unsafe_lock;
struct ceph_snap_realm *i_snap_realm; /* snap realm (if caps) */ union {
struct ceph_snap_realm *i_snap_realm; /* snap realm (if caps) */
struct ceph_snapid_map *i_snapid_map; /* snapid -> dev_t */
};
int i_snap_realm_counter; /* snap realm (if caps) */ int i_snap_realm_counter; /* snap realm (if caps) */
struct list_head i_snap_realm_item; struct list_head i_snap_realm_item;
struct list_head i_snap_flush_item; struct list_head i_snap_flush_item;
...@@ -587,7 +599,7 @@ extern u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, ...@@ -587,7 +599,7 @@ extern u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
struct ceph_inode_frag *pfrag, struct ceph_inode_frag *pfrag,
int *found); int *found);
static inline struct ceph_dentry_info *ceph_dentry(struct dentry *dentry) static inline struct ceph_dentry_info *ceph_dentry(const struct dentry *dentry)
{ {
return (struct ceph_dentry_info *)dentry->d_fsdata; return (struct ceph_dentry_info *)dentry->d_fsdata;
} }
...@@ -656,7 +668,8 @@ extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check); ...@@ -656,7 +668,8 @@ extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check);
extern void ceph_caps_init(struct ceph_mds_client *mdsc); extern void ceph_caps_init(struct ceph_mds_client *mdsc);
extern void ceph_caps_finalize(struct ceph_mds_client *mdsc); extern void ceph_caps_finalize(struct ceph_mds_client *mdsc);
extern void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta); extern void ceph_adjust_caps_max_min(struct ceph_mds_client *mdsc,
struct ceph_mount_options *fsopt);
extern int ceph_reserve_caps(struct ceph_mds_client *mdsc, extern int ceph_reserve_caps(struct ceph_mds_client *mdsc,
struct ceph_cap_reservation *ctx, int need); struct ceph_cap_reservation *ctx, int need);
extern void ceph_unreserve_caps(struct ceph_mds_client *mdsc, extern void ceph_unreserve_caps(struct ceph_mds_client *mdsc,
...@@ -837,6 +850,14 @@ extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci, ...@@ -837,6 +850,14 @@ extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
struct ceph_cap_snap *capsnap); struct ceph_cap_snap *capsnap);
extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc); extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc);
extern struct ceph_snapid_map *ceph_get_snapid_map(struct ceph_mds_client *mdsc,
u64 snap);
extern void ceph_put_snapid_map(struct ceph_mds_client* mdsc,
struct ceph_snapid_map *sm);
extern void ceph_trim_snapid_map(struct ceph_mds_client *mdsc);
extern void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc);
/* /*
* a cap_snap is "pending" if it is still awaiting an in-progress * a cap_snap is "pending" if it is still awaiting an in-progress
* sync write (that may/may not still update size, mtime, etc.). * sync write (that may/may not still update size, mtime, etc.).
...@@ -975,11 +996,11 @@ extern void ceph_add_cap(struct inode *inode, ...@@ -975,11 +996,11 @@ extern void ceph_add_cap(struct inode *inode,
unsigned cap, unsigned seq, u64 realmino, int flags, unsigned cap, unsigned seq, u64 realmino, int flags,
struct ceph_cap **new_cap); struct ceph_cap **new_cap);
extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release); extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
extern void __ceph_remove_caps(struct inode* inode);
extern void ceph_put_cap(struct ceph_mds_client *mdsc, extern void ceph_put_cap(struct ceph_mds_client *mdsc,
struct ceph_cap *cap); struct ceph_cap *cap);
extern int ceph_is_any_caps(struct inode *inode); extern int ceph_is_any_caps(struct inode *inode);
extern void ceph_queue_caps_release(struct inode *inode);
extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc); extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc);
extern int ceph_fsync(struct file *file, loff_t start, loff_t end, extern int ceph_fsync(struct file *file, loff_t start, loff_t end,
int datasync); int datasync);
...@@ -1049,10 +1070,10 @@ extern int ceph_handle_snapdir(struct ceph_mds_request *req, ...@@ -1049,10 +1070,10 @@ extern int ceph_handle_snapdir(struct ceph_mds_request *req,
extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
struct dentry *dentry, int err); struct dentry *dentry, int err);
extern void ceph_dentry_lru_add(struct dentry *dn); extern void __ceph_dentry_lease_touch(struct ceph_dentry_info *di);
extern void ceph_dentry_lru_touch(struct dentry *dn); extern void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di);
extern void ceph_dentry_lru_del(struct dentry *dn);
extern void ceph_invalidate_dentry_lease(struct dentry *dentry); extern void ceph_invalidate_dentry_lease(struct dentry *dentry);
extern int ceph_trim_dentries(struct ceph_mds_client *mdsc);
extern unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn); extern unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn);
extern void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl); extern void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl);
......
...@@ -228,8 +228,19 @@ static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val, ...@@ -228,8 +228,19 @@ static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
ci->i_rctime.tv_nsec); ci->i_rctime.tv_nsec);
} }
/* quotas */ /* dir pin */
static bool ceph_vxattrcb_dir_pin_exists(struct ceph_inode_info *ci)
{
return ci->i_dir_pin != -ENODATA;
}
static size_t ceph_vxattrcb_dir_pin(struct ceph_inode_info *ci, char *val,
size_t size)
{
return snprintf(val, size, "%d", (int)ci->i_dir_pin);
}
/* quotas */
static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci) static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci)
{ {
bool ret = false; bool ret = false;
...@@ -314,6 +325,13 @@ static struct ceph_vxattr ceph_dir_vxattrs[] = { ...@@ -314,6 +325,13 @@ static struct ceph_vxattr ceph_dir_vxattrs[] = {
XATTR_RSTAT_FIELD(dir, rsubdirs), XATTR_RSTAT_FIELD(dir, rsubdirs),
XATTR_RSTAT_FIELD(dir, rbytes), XATTR_RSTAT_FIELD(dir, rbytes),
XATTR_RSTAT_FIELD(dir, rctime), XATTR_RSTAT_FIELD(dir, rctime),
{
.name = "ceph.dir.pin",
.name_size = sizeof("ceph.dir_pin"),
.getxattr_cb = ceph_vxattrcb_dir_pin,
.exists_cb = ceph_vxattrcb_dir_pin_exists,
.flags = VXATTR_FLAG_HIDDEN,
},
{ {
.name = "ceph.quota", .name = "ceph.quota",
.name_size = sizeof("ceph.quota"), .name_size = sizeof("ceph.quota"),
......
...@@ -24,6 +24,7 @@ struct ceph_vino { ...@@ -24,6 +24,7 @@ struct ceph_vino {
/* context for the caps reservation mechanism */ /* context for the caps reservation mechanism */
struct ceph_cap_reservation { struct ceph_cap_reservation {
int count; int count;
int used;
}; };
......
...@@ -495,9 +495,8 @@ static struct crush_map *crush_decode(void *pbyval, void *end) ...@@ -495,9 +495,8 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
/ sizeof(struct crush_rule_step)) / sizeof(struct crush_rule_step))
goto bad; goto bad;
#endif #endif
r = c->rules[i] = kmalloc(sizeof(*r) + r = kmalloc(struct_size(r, steps, yes), GFP_NOFS);
yes*sizeof(struct crush_rule_step), c->rules[i] = r;
GFP_NOFS);
if (r == NULL) if (r == NULL)
goto badmem; goto badmem;
dout(" rule %d is at %p\n", i, r); dout(" rule %d is at %p\n", i, r);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment