Commit 08242bc2 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-nmw

Pull GFS2 updates from Steven Whitehouse:
 "The main feature this time is the new Orlov allocator and the patches
  leading up to it which allow us to allocate new inodes from their own
  allocation context, rather than borrowing that of their parent
  directory.  It is this change which then allows us to choose a
  different location for subdirectories when required.  This works
  exactly as per the ext3 implementation from the users point of view.

  In addition to that, we've got a speed up in gfs2_rbm_from_block()
  from Bob Peterson, three locking related improvements from Dave
  Teigland plus a selection of smaller bug fixes and clean ups."

* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-nmw:
  GFS2: Set gl_object during inode create
  GFS2: add error check while allocating new inodes
  GFS2: don't reference inode's glock during block allocation trace
  GFS2: remove redundant lvb pointer
  GFS2: only use lvb on glocks that need it
  GFS2: skip dlm_unlock calls in unmount
  GFS2: Fix one RG corner case
  GFS2: Eliminate redundant buffer_head manipulation in gfs2_unlink_inode
  GFS2: Use dirty_inode in gfs2_dir_add
  GFS2: Fix truncation of journaled data files
  GFS2: Add Orlov allocator
  GFS2: Use proper allocation context for new inodes
  GFS2: Add test for resource group congestion status
  GFS2: Rename glops go_xmote_th to go_sync
  GFS2: Speed up gfs2_rbm_from_block
  GFS2: Review bug traps in glops.c
parents be354f40 1e2d9d44
...@@ -643,7 +643,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, ...@@ -643,7 +643,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
goto out_unlock; goto out_unlock;
requested = data_blocks + ind_blocks; requested = data_blocks + ind_blocks;
error = gfs2_inplace_reserve(ip, requested); error = gfs2_inplace_reserve(ip, requested, 0);
if (error) if (error)
goto out_qunlock; goto out_qunlock;
} }
......
...@@ -991,6 +991,41 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from) ...@@ -991,6 +991,41 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
return err; return err;
} }
/**
* gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
* @inode: The inode being truncated
* @oldsize: The original (larger) size
* @newsize: The new smaller size
*
* With jdata files, we have to journal a revoke for each block which is
* truncated. As a result, we need to split this into separate transactions
* if the number of pages being truncated gets too large.
*/
#define GFS2_JTRUNC_REVOKES 8192
static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
{
struct gfs2_sbd *sdp = GFS2_SB(inode);
u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
u64 chunk;
int error;
while (oldsize != newsize) {
chunk = oldsize - newsize;
if (chunk > max_chunk)
chunk = max_chunk;
truncate_pagecache(inode, oldsize, oldsize - chunk);
oldsize -= chunk;
gfs2_trans_end(sdp);
error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
if (error)
return error;
}
return 0;
}
static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize) static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
{ {
struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_inode *ip = GFS2_I(inode);
...@@ -1000,8 +1035,10 @@ static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize) ...@@ -1000,8 +1035,10 @@ static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
int journaled = gfs2_is_jdata(ip); int journaled = gfs2_is_jdata(ip);
int error; int error;
error = gfs2_trans_begin(sdp, if (journaled)
RES_DINODE + (journaled ? RES_JDATA : 0), 0); error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
else
error = gfs2_trans_begin(sdp, RES_DINODE, 0);
if (error) if (error)
return error; return error;
...@@ -1026,7 +1063,16 @@ static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize) ...@@ -1026,7 +1063,16 @@ static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_dinode_out(ip, dibh->b_data); gfs2_dinode_out(ip, dibh->b_data);
truncate_pagecache(inode, oldsize, newsize); if (journaled)
error = gfs2_journaled_truncate(inode, oldsize, newsize);
else
truncate_pagecache(inode, oldsize, newsize);
if (error) {
brelse(dibh);
return error;
}
out_brelse: out_brelse:
brelse(dibh); brelse(dibh);
out: out:
...@@ -1178,7 +1224,7 @@ static int do_grow(struct inode *inode, u64 size) ...@@ -1178,7 +1224,7 @@ static int do_grow(struct inode *inode, u64 size)
if (error) if (error)
return error; return error;
error = gfs2_inplace_reserve(ip, 1); error = gfs2_inplace_reserve(ip, 1, 0);
if (error) if (error)
goto do_grow_qunlock; goto do_grow_qunlock;
unstuff = 1; unstuff = 1;
......
...@@ -1676,16 +1676,11 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name, ...@@ -1676,16 +1676,11 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
be16_add_cpu(&leaf->lf_entries, 1); be16_add_cpu(&leaf->lf_entries, 1);
} }
brelse(bh); brelse(bh);
error = gfs2_meta_inode_buffer(ip, &bh);
if (error)
break;
gfs2_trans_add_bh(ip->i_gl, bh, 1);
ip->i_entries++; ip->i_entries++;
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
if (S_ISDIR(nip->i_inode.i_mode)) if (S_ISDIR(nip->i_inode.i_mode))
inc_nlink(&ip->i_inode); inc_nlink(&ip->i_inode);
gfs2_dinode_out(ip, bh->b_data); mark_inode_dirty(inode);
brelse(bh);
error = 0; error = 0;
break; break;
} }
......
...@@ -432,7 +432,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -432,7 +432,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret) if (ret)
goto out_unlock; goto out_unlock;
gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks); ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0);
if (ret) if (ret)
goto out_quota_unlock; goto out_quota_unlock;
...@@ -825,7 +825,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset, ...@@ -825,7 +825,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
retry: retry:
gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks); gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks); error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0);
if (error) { if (error) {
if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) { if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
bytes >>= 1; bytes >>= 1;
......
...@@ -55,8 +55,6 @@ struct gfs2_glock_iter { ...@@ -55,8 +55,6 @@ struct gfs2_glock_iter {
typedef void (*glock_examiner) (struct gfs2_glock * gl); typedef void (*glock_examiner) (struct gfs2_glock * gl);
static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
static struct dentry *gfs2_root; static struct dentry *gfs2_root;
...@@ -107,10 +105,12 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu) ...@@ -107,10 +105,12 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
{ {
struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
if (gl->gl_ops->go_flags & GLOF_ASPACE) if (gl->gl_ops->go_flags & GLOF_ASPACE) {
kmem_cache_free(gfs2_glock_aspace_cachep, gl); kmem_cache_free(gfs2_glock_aspace_cachep, gl);
else } else {
kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(gfs2_glock_cachep, gl); kmem_cache_free(gfs2_glock_cachep, gl);
}
} }
void gfs2_glock_free(struct gfs2_glock *gl) void gfs2_glock_free(struct gfs2_glock *gl)
...@@ -537,8 +537,8 @@ __acquires(&gl->gl_spin) ...@@ -537,8 +537,8 @@ __acquires(&gl->gl_spin)
(lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB))) (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
clear_bit(GLF_BLOCKING, &gl->gl_flags); clear_bit(GLF_BLOCKING, &gl->gl_flags);
spin_unlock(&gl->gl_spin); spin_unlock(&gl->gl_spin);
if (glops->go_xmote_th) if (glops->go_sync)
glops->go_xmote_th(gl); glops->go_sync(gl);
if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
...@@ -547,7 +547,10 @@ __acquires(&gl->gl_spin) ...@@ -547,7 +547,10 @@ __acquires(&gl->gl_spin)
if (sdp->sd_lockstruct.ls_ops->lm_lock) { if (sdp->sd_lockstruct.ls_ops->lm_lock) {
/* lock_dlm */ /* lock_dlm */
ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
GLOCK_BUG_ON(gl, ret); if (ret) {
printk(KERN_ERR "GFS2: lm_lock ret %d\n", ret);
GLOCK_BUG_ON(gl, 1);
}
} else { /* lock_nolock */ } else { /* lock_nolock */
finish_xmote(gl, target); finish_xmote(gl, target);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
...@@ -736,6 +739,16 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, ...@@ -736,6 +739,16 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
if (!gl) if (!gl)
return -ENOMEM; return -ENOMEM;
memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
if (glops->go_flags & GLOF_LVB) {
gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL);
if (!gl->gl_lksb.sb_lvbptr) {
kmem_cache_free(cachep, gl);
return -ENOMEM;
}
}
atomic_inc(&sdp->sd_glock_disposal); atomic_inc(&sdp->sd_glock_disposal);
gl->gl_sbd = sdp; gl->gl_sbd = sdp;
gl->gl_flags = 0; gl->gl_flags = 0;
...@@ -753,9 +766,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, ...@@ -753,9 +766,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
preempt_enable(); preempt_enable();
gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
memset(gl->gl_lvb, 0, 32 * sizeof(char));
gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
gl->gl_tchange = jiffies; gl->gl_tchange = jiffies;
gl->gl_object = NULL; gl->gl_object = NULL;
gl->gl_hold_time = GL_GLOCK_DFT_HOLD; gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
...@@ -777,6 +787,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, ...@@ -777,6 +787,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
tmp = search_bucket(hash, sdp, &name); tmp = search_bucket(hash, sdp, &name);
if (tmp) { if (tmp) {
spin_unlock_bucket(hash); spin_unlock_bucket(hash);
kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(cachep, gl); kmem_cache_free(cachep, gl);
atomic_dec(&sdp->sd_glock_disposal); atomic_dec(&sdp->sd_glock_disposal);
gl = tmp; gl = tmp;
...@@ -1013,7 +1024,7 @@ __acquires(&gl->gl_spin) ...@@ -1013,7 +1024,7 @@ __acquires(&gl->gl_spin)
printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid)); printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
printk(KERN_ERR "lock type: %d req lock state : %d\n", printk(KERN_ERR "lock type: %d req lock state : %d\n",
gh->gh_gl->gl_name.ln_type, gh->gh_state); gh->gh_gl->gl_name.ln_type, gh->gh_state);
__dump_glock(NULL, gl); gfs2_dump_glock(NULL, gl);
BUG(); BUG();
} }
...@@ -1508,7 +1519,7 @@ static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl) ...@@ -1508,7 +1519,7 @@ static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
{ {
int ret; int ret;
spin_lock(&gl->gl_spin); spin_lock(&gl->gl_spin);
ret = __dump_glock(seq, gl); ret = gfs2_dump_glock(seq, gl);
spin_unlock(&gl->gl_spin); spin_unlock(&gl->gl_spin);
return ret; return ret;
} }
...@@ -1528,6 +1539,7 @@ static void dump_glock_func(struct gfs2_glock *gl) ...@@ -1528,6 +1539,7 @@ static void dump_glock_func(struct gfs2_glock *gl)
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
{ {
set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
glock_hash_walk(clear_glock, sdp); glock_hash_walk(clear_glock, sdp);
flush_workqueue(glock_workqueue); flush_workqueue(glock_workqueue);
wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0); wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
...@@ -1655,7 +1667,7 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl) ...@@ -1655,7 +1667,7 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
} }
/** /**
* __dump_glock - print information about a glock * gfs2_dump_glock - print information about a glock
* @seq: The seq_file struct * @seq: The seq_file struct
* @gl: the glock * @gl: the glock
* *
...@@ -1672,7 +1684,7 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl) ...@@ -1672,7 +1684,7 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
* Returns: 0 on success, -ENOBUFS when we run out of space * Returns: 0 on success, -ENOBUFS when we run out of space
*/ */
static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl) int gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
{ {
const struct gfs2_glock_operations *glops = gl->gl_ops; const struct gfs2_glock_operations *glops = gl->gl_ops;
unsigned long long dtime; unsigned long long dtime;
......
...@@ -178,33 +178,33 @@ static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl) ...@@ -178,33 +178,33 @@ static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
return NULL; return NULL;
} }
int gfs2_glock_get(struct gfs2_sbd *sdp, extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
u64 number, const struct gfs2_glock_operations *glops, const struct gfs2_glock_operations *glops,
int create, struct gfs2_glock **glp); int create, struct gfs2_glock **glp);
void gfs2_glock_hold(struct gfs2_glock *gl); extern void gfs2_glock_hold(struct gfs2_glock *gl);
void gfs2_glock_put_nolock(struct gfs2_glock *gl); extern void gfs2_glock_put_nolock(struct gfs2_glock *gl);
void gfs2_glock_put(struct gfs2_glock *gl); extern void gfs2_glock_put(struct gfs2_glock *gl);
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
struct gfs2_holder *gh); unsigned flags, struct gfs2_holder *gh);
void gfs2_holder_reinit(unsigned int state, unsigned flags, extern void gfs2_holder_reinit(unsigned int state, unsigned flags,
struct gfs2_holder *gh); struct gfs2_holder *gh);
void gfs2_holder_uninit(struct gfs2_holder *gh); extern void gfs2_holder_uninit(struct gfs2_holder *gh);
int gfs2_glock_nq(struct gfs2_holder *gh); extern int gfs2_glock_nq(struct gfs2_holder *gh);
int gfs2_glock_poll(struct gfs2_holder *gh); extern int gfs2_glock_poll(struct gfs2_holder *gh);
int gfs2_glock_wait(struct gfs2_holder *gh); extern int gfs2_glock_wait(struct gfs2_holder *gh);
void gfs2_glock_dq(struct gfs2_holder *gh); extern void gfs2_glock_dq(struct gfs2_holder *gh);
void gfs2_glock_dq_wait(struct gfs2_holder *gh); extern void gfs2_glock_dq_wait(struct gfs2_holder *gh);
extern void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
void gfs2_glock_dq_uninit(struct gfs2_holder *gh); extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
int gfs2_glock_nq_num(struct gfs2_sbd *sdp, const struct gfs2_glock_operations *glops,
u64 number, const struct gfs2_glock_operations *glops, unsigned int state, int flags,
unsigned int state, int flags, struct gfs2_holder *gh); struct gfs2_holder *gh);
extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs); extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs); extern void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs); extern int gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { gfs2_dump_glock(NULL, gl); BUG(); } } while(0)
__printf(2, 3) extern __printf(2, 3)
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...); void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
/** /**
......
...@@ -74,7 +74,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) ...@@ -74,7 +74,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
gfs2_trans_add_revoke(sdp, bd); gfs2_trans_add_revoke(sdp, bd);
} }
BUG_ON(!fsync && atomic_read(&gl->gl_ail_count)); GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
spin_unlock(&sdp->sd_ail_lock); spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp); gfs2_log_unlock(sdp);
} }
...@@ -96,7 +96,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl) ...@@ -96,7 +96,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
tr.tr_ip = (unsigned long)__builtin_return_address(0); tr.tr_ip = (unsigned long)__builtin_return_address(0);
sb_start_intwrite(sdp->sd_vfs); sb_start_intwrite(sdp->sd_vfs);
gfs2_log_reserve(sdp, tr.tr_reserved); gfs2_log_reserve(sdp, tr.tr_reserved);
BUG_ON(current->journal_info); WARN_ON_ONCE(current->journal_info);
current->journal_info = &tr; current->journal_info = &tr;
__gfs2_ail_flush(gl, 0); __gfs2_ail_flush(gl, 0);
...@@ -139,7 +139,7 @@ static void rgrp_go_sync(struct gfs2_glock *gl) ...@@ -139,7 +139,7 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
return; return;
BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE); GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
gfs2_log_flush(gl->gl_sbd, gl); gfs2_log_flush(gl->gl_sbd, gl);
filemap_fdatawrite(metamapping); filemap_fdatawrite(metamapping);
...@@ -168,7 +168,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags) ...@@ -168,7 +168,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
{ {
struct address_space *mapping = gfs2_glock2aspace(gl); struct address_space *mapping = gfs2_glock2aspace(gl);
BUG_ON(!(flags & DIO_METADATA)); WARN_ON_ONCE(!(flags & DIO_METADATA));
gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
truncate_inode_pages(mapping, 0); truncate_inode_pages(mapping, 0);
...@@ -197,7 +197,7 @@ static void inode_go_sync(struct gfs2_glock *gl) ...@@ -197,7 +197,7 @@ static void inode_go_sync(struct gfs2_glock *gl)
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
return; return;
BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE); GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
gfs2_log_flush(gl->gl_sbd, gl); gfs2_log_flush(gl->gl_sbd, gl);
filemap_fdatawrite(metamapping); filemap_fdatawrite(metamapping);
...@@ -536,7 +536,7 @@ const struct gfs2_glock_operations gfs2_meta_glops = { ...@@ -536,7 +536,7 @@ const struct gfs2_glock_operations gfs2_meta_glops = {
}; };
const struct gfs2_glock_operations gfs2_inode_glops = { const struct gfs2_glock_operations gfs2_inode_glops = {
.go_xmote_th = inode_go_sync, .go_sync = inode_go_sync,
.go_inval = inode_go_inval, .go_inval = inode_go_inval,
.go_demote_ok = inode_go_demote_ok, .go_demote_ok = inode_go_demote_ok,
.go_lock = inode_go_lock, .go_lock = inode_go_lock,
...@@ -546,17 +546,17 @@ const struct gfs2_glock_operations gfs2_inode_glops = { ...@@ -546,17 +546,17 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
}; };
const struct gfs2_glock_operations gfs2_rgrp_glops = { const struct gfs2_glock_operations gfs2_rgrp_glops = {
.go_xmote_th = rgrp_go_sync, .go_sync = rgrp_go_sync,
.go_inval = rgrp_go_inval, .go_inval = rgrp_go_inval,
.go_lock = gfs2_rgrp_go_lock, .go_lock = gfs2_rgrp_go_lock,
.go_unlock = gfs2_rgrp_go_unlock, .go_unlock = gfs2_rgrp_go_unlock,
.go_dump = gfs2_rgrp_dump, .go_dump = gfs2_rgrp_dump,
.go_type = LM_TYPE_RGRP, .go_type = LM_TYPE_RGRP,
.go_flags = GLOF_ASPACE, .go_flags = GLOF_ASPACE | GLOF_LVB,
}; };
const struct gfs2_glock_operations gfs2_trans_glops = { const struct gfs2_glock_operations gfs2_trans_glops = {
.go_xmote_th = trans_go_sync, .go_sync = trans_go_sync,
.go_xmote_bh = trans_go_xmote_bh, .go_xmote_bh = trans_go_xmote_bh,
.go_demote_ok = trans_go_demote_ok, .go_demote_ok = trans_go_demote_ok,
.go_type = LM_TYPE_NONDISK, .go_type = LM_TYPE_NONDISK,
...@@ -577,6 +577,7 @@ const struct gfs2_glock_operations gfs2_nondisk_glops = { ...@@ -577,6 +577,7 @@ const struct gfs2_glock_operations gfs2_nondisk_glops = {
const struct gfs2_glock_operations gfs2_quota_glops = { const struct gfs2_glock_operations gfs2_quota_glops = {
.go_type = LM_TYPE_QUOTA, .go_type = LM_TYPE_QUOTA,
.go_flags = GLOF_LVB,
}; };
const struct gfs2_glock_operations gfs2_journal_glops = { const struct gfs2_glock_operations gfs2_journal_glops = {
......
...@@ -205,7 +205,7 @@ struct lm_lockname { ...@@ -205,7 +205,7 @@ struct lm_lockname {
struct gfs2_glock_operations { struct gfs2_glock_operations {
void (*go_xmote_th) (struct gfs2_glock *gl); void (*go_sync) (struct gfs2_glock *gl);
int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh); int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
void (*go_inval) (struct gfs2_glock *gl, int flags); void (*go_inval) (struct gfs2_glock *gl, int flags);
int (*go_demote_ok) (const struct gfs2_glock *gl); int (*go_demote_ok) (const struct gfs2_glock *gl);
...@@ -216,6 +216,7 @@ struct gfs2_glock_operations { ...@@ -216,6 +216,7 @@ struct gfs2_glock_operations {
const int go_type; const int go_type;
const unsigned long go_flags; const unsigned long go_flags;
#define GLOF_ASPACE 1 #define GLOF_ASPACE 1
#define GLOF_LVB 2
}; };
enum { enum {
...@@ -321,7 +322,6 @@ struct gfs2_glock { ...@@ -321,7 +322,6 @@ struct gfs2_glock {
ktime_t gl_dstamp; ktime_t gl_dstamp;
struct gfs2_lkstats gl_stats; struct gfs2_lkstats gl_stats;
struct dlm_lksb gl_lksb; struct dlm_lksb gl_lksb;
char gl_lvb[32];
unsigned long gl_tchange; unsigned long gl_tchange;
void *gl_object; void *gl_object;
...@@ -539,6 +539,7 @@ enum { ...@@ -539,6 +539,7 @@ enum {
SDF_DEMOTE = 5, SDF_DEMOTE = 5,
SDF_NOJOURNALID = 6, SDF_NOJOURNALID = 6,
SDF_RORECOVERY = 7, /* read only recovery */ SDF_RORECOVERY = 7, /* read only recovery */
SDF_SKIP_DLM_UNLOCK = 8,
}; };
#define GFS2_FSNAME_LEN 256 #define GFS2_FSNAME_LEN 256
...@@ -621,6 +622,7 @@ struct gfs2_sbd { ...@@ -621,6 +622,7 @@ struct gfs2_sbd {
u32 sd_hash_bsize_shift; u32 sd_hash_bsize_shift;
u32 sd_hash_ptrs; /* Number of pointers in a hash block */ u32 sd_hash_ptrs; /* Number of pointers in a hash block */
u32 sd_qc_per_block; u32 sd_qc_per_block;
u32 sd_blocks_per_bitmap;
u32 sd_max_dirres; /* Max blocks needed to add a directory entry */ u32 sd_max_dirres; /* Max blocks needed to add a directory entry */
u32 sd_max_height; /* Max height of a file's metadata tree */ u32 sd_max_height; /* Max height of a file's metadata tree */
u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1]; u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1];
......
This diff is collapsed.
...@@ -120,8 +120,8 @@ static void gdlm_ast(void *arg) ...@@ -120,8 +120,8 @@ static void gdlm_ast(void *arg)
gfs2_update_reply_times(gl); gfs2_update_reply_times(gl);
BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
if (gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr)
memset(gl->gl_lvb, 0, GDLM_LVB_SIZE); memset(gl->gl_lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
switch (gl->gl_lksb.sb_status) { switch (gl->gl_lksb.sb_status) {
case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */ case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
...@@ -203,8 +203,10 @@ static int make_mode(const unsigned int lmstate) ...@@ -203,8 +203,10 @@ static int make_mode(const unsigned int lmstate)
static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags, static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
const int req) const int req)
{ {
u32 lkf = DLM_LKF_VALBLK; u32 lkf = 0;
u32 lkid = gl->gl_lksb.sb_lkid;
if (gl->gl_lksb.sb_lvbptr)
lkf |= DLM_LKF_VALBLK;
if (gfs_flags & LM_FLAG_TRY) if (gfs_flags & LM_FLAG_TRY)
lkf |= DLM_LKF_NOQUEUE; lkf |= DLM_LKF_NOQUEUE;
...@@ -228,7 +230,7 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags, ...@@ -228,7 +230,7 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
BUG(); BUG();
} }
if (lkid != 0) { if (gl->gl_lksb.sb_lkid != 0) {
lkf |= DLM_LKF_CONVERT; lkf |= DLM_LKF_CONVERT;
if (test_bit(GLF_BLOCKING, &gl->gl_flags)) if (test_bit(GLF_BLOCKING, &gl->gl_flags))
lkf |= DLM_LKF_QUECVT; lkf |= DLM_LKF_QUECVT;
...@@ -289,6 +291,14 @@ static void gdlm_put_lock(struct gfs2_glock *gl) ...@@ -289,6 +291,14 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_update_request_times(gl); gfs2_update_request_times(gl);
/* don't want to skip dlm_unlock writing the lvb when lock is ex */
if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
gl->gl_lksb.sb_lvbptr && (gl->gl_state != LM_ST_EXCLUSIVE)) {
gfs2_glock_free(gl);
return;
}
error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
NULL, gl); NULL, gl);
if (error) { if (error) {
......
...@@ -278,6 +278,9 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent) ...@@ -278,6 +278,9 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize - sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize -
sizeof(struct gfs2_meta_header)) / sizeof(struct gfs2_meta_header)) /
sizeof(struct gfs2_quota_change); sizeof(struct gfs2_quota_change);
sdp->sd_blocks_per_bitmap = (sdp->sd_sb.sb_bsize -
sizeof(struct gfs2_meta_header))
* GFS2_NBBY; /* not the rgrp bitmap, subsequent bitmaps only */
/* Compute maximum reservation required to add a entry to a directory */ /* Compute maximum reservation required to add a entry to a directory */
......
...@@ -816,7 +816,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) ...@@ -816,7 +816,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3; blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
reserved = 1 + (nalloc * (data_blocks + ind_blocks)); reserved = 1 + (nalloc * (data_blocks + ind_blocks));
error = gfs2_inplace_reserve(ip, reserved); error = gfs2_inplace_reserve(ip, reserved, 0);
if (error) if (error)
goto out_alloc; goto out_alloc;
...@@ -869,7 +869,7 @@ static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) ...@@ -869,7 +869,7 @@ static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
if (error < 0) if (error < 0)
return error; return error;
qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
qlvb->__pad = 0; qlvb->__pad = 0;
qlvb->qb_limit = q.qu_limit; qlvb->qb_limit = q.qu_limit;
...@@ -893,7 +893,7 @@ static int do_glock(struct gfs2_quota_data *qd, int force_refresh, ...@@ -893,7 +893,7 @@ static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
if (error) if (error)
return error; return error;
qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
gfs2_glock_dq_uninit(q_gh); gfs2_glock_dq_uninit(q_gh);
...@@ -1506,7 +1506,7 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, ...@@ -1506,7 +1506,7 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
if (error) if (error)
goto out; goto out;
qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
fdq->d_version = FS_DQUOT_VERSION; fdq->d_version = FS_DQUOT_VERSION;
fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA; fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
fdq->d_id = from_kqid(&init_user_ns, qid); fdq->d_id = from_kqid(&init_user_ns, qid);
...@@ -1605,7 +1605,7 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, ...@@ -1605,7 +1605,7 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
&data_blocks, &ind_blocks); &data_blocks, &ind_blocks);
blocks = 1 + data_blocks + ind_blocks; blocks = 1 + data_blocks + ind_blocks;
error = gfs2_inplace_reserve(ip, blocks); error = gfs2_inplace_reserve(ip, blocks, 0);
if (error) if (error)
goto out_i; goto out_i;
blocks += gfs2_rg_blocks(ip, blocks); blocks += gfs2_rg_blocks(ip, blocks);
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/random.h>
#include "gfs2.h" #include "gfs2.h"
#include "incore.h" #include "incore.h"
...@@ -251,22 +252,25 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len, ...@@ -251,22 +252,25 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block) static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
{ {
u64 rblock = block - rbm->rgd->rd_data0; u64 rblock = block - rbm->rgd->rd_data0;
u32 goal = (u32)rblock; u32 x;
int x;
if (WARN_ON_ONCE(rblock > UINT_MAX)) if (WARN_ON_ONCE(rblock > UINT_MAX))
return -EINVAL; return -EINVAL;
if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data) if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
return -E2BIG; return -E2BIG;
for (x = 0; x < rbm->rgd->rd_length; x++) { rbm->bi = rbm->rgd->rd_bits;
rbm->bi = rbm->rgd->rd_bits + x; rbm->offset = (u32)(rblock);
if (goal < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY) { /* Check if the block is within the first block */
rbm->offset = goal - (rbm->bi->bi_start * GFS2_NBBY); if (rbm->offset < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY)
break; return 0;
}
}
/* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
rbm->offset += (sizeof(struct gfs2_rgrp) -
sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
x = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
rbm->offset -= x * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
rbm->bi += x;
return 0; return 0;
} }
...@@ -875,7 +879,7 @@ static int read_rindex_entry(struct gfs2_inode *ip) ...@@ -875,7 +879,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
goto fail; goto fail;
rgd->rd_gl->gl_object = rgd; rgd->rd_gl->gl_object = rgd;
rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lvb; rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
rgd->rd_flags &= ~GFS2_RDF_UPTODATE; rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
if (rgd->rd_data > sdp->sd_max_rg_data) if (rgd->rd_data > sdp->sd_max_rg_data)
sdp->sd_max_rg_data = rgd->rd_data; sdp->sd_max_rg_data = rgd->rd_data;
...@@ -1678,13 +1682,105 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip ...@@ -1678,13 +1682,105 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
return; return;
} }
/**
* gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
* @rgd: The rgrp in question
* @loops: An indication of how picky we can be (0=very, 1=less so)
*
* This function uses the recently added glock statistics in order to
* figure out whether a parciular resource group is suffering from
* contention from multiple nodes. This is done purely on the basis
* of timings, since this is the only data we have to work with and
* our aim here is to reject a resource group which is highly contended
* but (very important) not to do this too often in order to ensure that
* we do not land up introducing fragmentation by changing resource
* groups when not actually required.
*
* The calculation is fairly simple, we want to know whether the SRTTB
* (i.e. smoothed round trip time for blocking operations) to acquire
* the lock for this rgrp's glock is significantly greater than the
* time taken for resource groups on average. We introduce a margin in
* the form of the variable @var which is computed as the sum of the two
* respective variences, and multiplied by a factor depending on @loops
* and whether we have a lot of data to base the decision on. This is
* then tested against the square difference of the means in order to
* decide whether the result is statistically significant or not.
*
* Returns: A boolean verdict on the congestion status
*/
static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
{
const struct gfs2_glock *gl = rgd->rd_gl;
const struct gfs2_sbd *sdp = gl->gl_sbd;
struct gfs2_lkstats *st;
s64 r_dcount, l_dcount;
s64 r_srttb, l_srttb;
s64 srttb_diff;
s64 sqr_diff;
s64 var;
preempt_disable();
st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
r_srttb = st->stats[GFS2_LKS_SRTTB];
r_dcount = st->stats[GFS2_LKS_DCOUNT];
var = st->stats[GFS2_LKS_SRTTVARB] +
gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
preempt_enable();
l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
if ((l_dcount < 1) || (r_dcount < 1) || (r_srttb == 0))
return false;
srttb_diff = r_srttb - l_srttb;
sqr_diff = srttb_diff * srttb_diff;
var *= 2;
if (l_dcount < 8 || r_dcount < 8)
var *= 2;
if (loops == 1)
var *= 2;
return ((srttb_diff < 0) && (sqr_diff > var));
}
/**
* gfs2_rgrp_used_recently
* @rs: The block reservation with the rgrp to test
* @msecs: The time limit in milliseconds
*
* Returns: True if the rgrp glock has been used within the time limit
*/
static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
u64 msecs)
{
u64 tdiff;
tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
rs->rs_rbm.rgd->rd_gl->gl_dstamp));
return tdiff > (msecs * 1000 * 1000);
}
static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
{
const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
u32 skip;
get_random_bytes(&skip, sizeof(skip));
return skip % sdp->sd_rgrps;
}
static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin) static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
{ {
struct gfs2_rgrpd *rgd = *pos; struct gfs2_rgrpd *rgd = *pos;
struct gfs2_sbd *sdp = rgd->rd_sbd;
rgd = gfs2_rgrpd_get_next(rgd); rgd = gfs2_rgrpd_get_next(rgd);
if (rgd == NULL) if (rgd == NULL)
rgd = gfs2_rgrpd_get_next(NULL); rgd = gfs2_rgrpd_get_first(sdp);
*pos = rgd; *pos = rgd;
if (rgd != begin) /* If we didn't wrap */ if (rgd != begin) /* If we didn't wrap */
return true; return true;
...@@ -1699,14 +1795,15 @@ static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *b ...@@ -1699,14 +1795,15 @@ static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *b
* Returns: errno * Returns: errno
*/ */
int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested) int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
{ {
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *begin = NULL; struct gfs2_rgrpd *begin = NULL;
struct gfs2_blkreserv *rs = ip->i_res; struct gfs2_blkreserv *rs = ip->i_res;
int error = 0, rg_locked, flags = LM_FLAG_TRY; int error = 0, rg_locked, flags = 0;
u64 last_unlinked = NO_BLOCK; u64 last_unlinked = NO_BLOCK;
int loops = 0; int loops = 0;
u32 skip = 0;
if (sdp->sd_args.ar_rgrplvb) if (sdp->sd_args.ar_rgrplvb)
flags |= GL_SKIP; flags |= GL_SKIP;
...@@ -1720,6 +1817,8 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested) ...@@ -1720,6 +1817,8 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested)
} else { } else {
rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1); rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
} }
if (S_ISDIR(ip->i_inode.i_mode) && (aflags & GFS2_AF_ORLOV))
skip = gfs2_orlov_skip(ip);
if (rs->rs_rbm.rgd == NULL) if (rs->rs_rbm.rgd == NULL)
return -EBADSLT; return -EBADSLT;
...@@ -1728,13 +1827,20 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested) ...@@ -1728,13 +1827,20 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested)
if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) { if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
rg_locked = 0; rg_locked = 0;
if (skip && skip--)
goto next_rgrp;
if (!gfs2_rs_active(rs) && (loops < 2) &&
gfs2_rgrp_used_recently(rs, 1000) &&
gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
goto next_rgrp;
error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl, error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
LM_ST_EXCLUSIVE, flags, LM_ST_EXCLUSIVE, flags,
&rs->rs_rgd_gh); &rs->rs_rgd_gh);
if (error == GLR_TRYFAILED)
goto next_rgrp;
if (unlikely(error)) if (unlikely(error))
return error; return error;
if (!gfs2_rs_active(rs) && (loops < 2) &&
gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
goto skip_rgrp;
if (sdp->sd_args.ar_rgrplvb) { if (sdp->sd_args.ar_rgrplvb) {
error = update_rgrp_lvb(rs->rs_rbm.rgd); error = update_rgrp_lvb(rs->rs_rbm.rgd);
if (unlikely(error)) { if (unlikely(error)) {
...@@ -1781,12 +1887,13 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested) ...@@ -1781,12 +1887,13 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested)
/* Find the next rgrp, and continue looking */ /* Find the next rgrp, and continue looking */
if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin)) if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
continue; continue;
if (skip)
continue;
/* If we've scanned all the rgrps, but found no free blocks /* If we've scanned all the rgrps, but found no free blocks
* then this checks for some less likely conditions before * then this checks for some less likely conditions before
* trying again. * trying again.
*/ */
flags &= ~LM_FLAG_TRY;
loops++; loops++;
/* Check that fs hasn't grown if writing to rindex */ /* Check that fs hasn't grown if writing to rindex */
if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) { if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
......
...@@ -39,7 +39,8 @@ extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh); ...@@ -39,7 +39,8 @@ extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh);
extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip); extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
extern int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested); #define GFS2_AF_ORLOV 1
extern int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 flags);
extern void gfs2_inplace_release(struct gfs2_inode *ip); extern void gfs2_inplace_release(struct gfs2_inode *ip);
extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n, extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
......
...@@ -486,7 +486,7 @@ TRACE_EVENT(gfs2_block_alloc, ...@@ -486,7 +486,7 @@ TRACE_EVENT(gfs2_block_alloc,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev; __entry->dev = rgd->rd_gl->gl_sbd->sd_vfs->s_dev;
__entry->start = block; __entry->start = block;
__entry->inum = ip->i_no_addr; __entry->inum = ip->i_no_addr;
__entry->len = len; __entry->len = len;
......
...@@ -734,7 +734,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er, ...@@ -734,7 +734,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
if (error) if (error)
return error; return error;
error = gfs2_inplace_reserve(ip, blks); error = gfs2_inplace_reserve(ip, blks, 0);
if (error) if (error)
goto out_gunlock_q; goto out_gunlock_q;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment