Commit 1a5fb64f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'gfs2-4.12.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull GFS2 updates from Bob Peterson:
 "We've got ten GFS2 patches for this merge window.

   - Andreas Gruenbacher wrote a patch to replace the deprecated call to
     rhashtable_walk_init with rhashtable_walk_enter.

   - Andreas also wrote a patch to eliminate redundant code in two of
     our debugfs sequence files.

   - Andreas also cleaned up the rhashtable key ugliness Linus pointed
     out during this cycle, following Linus's suggestions.

   - Andreas also wrote a patch to take advantage of his new function
     rhashtable_lookup_get_insert_fast. This makes glock lookup faster
     and more bullet-proof.

   - Andreas also wrote a patch to revert a patch in the evict path that
     caused occasional deadlocks, and is no longer needed.

   - Andrew Price wrote a patch to re-enable fallocate for the rindex
     system file to enable gfs2_grow to grow properly on secondary file
     system grow operations.

   - I wrote a patch to initialize an inode number field to make certain
     kernel trace points more understandable.

   - I also wrote a patch that makes GFS2 file system "withdraw" work
     more like it should by ignoring operations after a withdraw that
     would formerly cause a BUG() and kernel panic.

   - I also reworked the entire truncate/delete algorithm, scrapping the
     old recursive algorithm in favor of a new non-recursive algorithm.
     This was done for performance: This way, GFS2 no longer needs to
     lock multiple resource groups while doing truncates and deletes of
     files that cross multiple resource group boundaries, allowing for
     better parallelism. It also solves a problem whereby deleting large
     files would request a large chunk of kernel memory, which resulted
     in a get_page_from_freelist warning.

   - Due to a regression found during testing, I added a new patch to
     correct 'GFS2: Prevent BUG from occurring when normal Withdraws
     occur'."

* tag 'gfs2-4.12.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
  GFS2: Allow glocks to be unlocked after withdraw
  GFS2: Non-recursive delete
  gfs2: Re-enable fallocate for the rindex
  Revert "GFS2: Wait for iopen glock dequeues"
  gfs2: Switch to rhashtable_lookup_get_insert_fast
  GFS2: Temporarily zero i_no_addr when creating a dinode
  gfs2: Don't pack struct lm_lockname
  gfs2: Deduplicate gfs2_{glocks,glstats}_open
  gfs2: Replace rhashtable_walk_init with rhashtable_walk_enter
  GFS2: Prevent BUG from occurring when normal Withdraws occur
parents aeced661 ed17545d
This diff is collapsed.
...@@ -911,11 +911,15 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t ...@@ -911,11 +911,15 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len) static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{ {
struct inode *inode = file_inode(file); struct inode *inode = file_inode(file);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh; struct gfs2_holder gh;
int ret; int ret;
if ((mode & ~FALLOC_FL_KEEP_SIZE) || gfs2_is_jdata(ip)) if (mode & ~FALLOC_FL_KEEP_SIZE)
return -EOPNOTSUPP;
/* fallocate is needed by gfs2_grow to reserve space in the rindex */
if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
return -EOPNOTSUPP; return -EOPNOTSUPP;
inode_lock(inode); inode_lock(inode);
......
...@@ -73,7 +73,7 @@ static DEFINE_SPINLOCK(lru_lock); ...@@ -73,7 +73,7 @@ static DEFINE_SPINLOCK(lru_lock);
static struct rhashtable_params ht_parms = { static struct rhashtable_params ht_parms = {
.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4, .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
.key_len = sizeof(struct lm_lockname), .key_len = offsetofend(struct lm_lockname, ln_type),
.key_offset = offsetof(struct gfs2_glock, gl_name), .key_offset = offsetof(struct gfs2_glock, gl_name),
.head_offset = offsetof(struct gfs2_glock, gl_node), .head_offset = offsetof(struct gfs2_glock, gl_node),
}; };
...@@ -449,6 +449,9 @@ __acquires(&gl->gl_lockref.lock) ...@@ -449,6 +449,9 @@ __acquires(&gl->gl_lockref.lock)
unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
int ret; int ret;
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) &&
target != LM_ST_UNLOCKED)
return;
lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
LM_FLAG_PRIORITY); LM_FLAG_PRIORITY);
GLOCK_BUG_ON(gl, gl->gl_state == target); GLOCK_BUG_ON(gl, gl->gl_state == target);
...@@ -484,7 +487,8 @@ __acquires(&gl->gl_lockref.lock) ...@@ -484,7 +487,8 @@ __acquires(&gl->gl_lockref.lock)
} }
else if (ret) { else if (ret) {
pr_err("lm_lock ret %d\n", ret); pr_err("lm_lock ret %d\n", ret);
GLOCK_BUG_ON(gl, 1); GLOCK_BUG_ON(gl, !test_bit(SDF_SHUTDOWN,
&sdp->sd_flags));
} }
} else { /* lock_nolock */ } else { /* lock_nolock */
finish_xmote(gl, target); finish_xmote(gl, target);
...@@ -653,10 +657,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, ...@@ -653,10 +657,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
struct lm_lockname name = { .ln_number = number, struct lm_lockname name = { .ln_number = number,
.ln_type = glops->go_type, .ln_type = glops->go_type,
.ln_sbd = sdp }; .ln_sbd = sdp };
struct gfs2_glock *gl, *tmp = NULL; struct gfs2_glock *gl, *tmp;
struct address_space *mapping; struct address_space *mapping;
struct kmem_cache *cachep; struct kmem_cache *cachep;
int ret, tries = 0; int ret = 0;
rcu_read_lock(); rcu_read_lock();
gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms); gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
...@@ -721,35 +725,32 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, ...@@ -721,35 +725,32 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
} }
again: again:
ret = rhashtable_lookup_insert_fast(&gl_hash_table, &gl->gl_node, rcu_read_lock();
tmp = rhashtable_lookup_get_insert_fast(&gl_hash_table, &gl->gl_node,
ht_parms); ht_parms);
if (ret == 0) { if (!tmp) {
*glp = gl; *glp = gl;
return 0; goto out;
} }
if (IS_ERR(tmp)) {
if (ret == -EEXIST) { ret = PTR_ERR(tmp);
ret = 0; goto out_free;
rcu_read_lock();
tmp = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
if (tmp == NULL || !lockref_get_not_dead(&tmp->gl_lockref)) {
if (++tries < 100) {
rcu_read_unlock();
cond_resched();
goto again;
} }
tmp = NULL; if (lockref_get_not_dead(&tmp->gl_lockref)) {
ret = -ENOMEM; *glp = tmp;
goto out_free;
} }
rcu_read_unlock(); rcu_read_unlock();
} else { cond_resched();
WARN_ON_ONCE(ret); goto again;
}
out_free:
kfree(gl->gl_lksb.sb_lvbptr); kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(cachep, gl); kmem_cache_free(cachep, gl);
atomic_dec(&sdp->sd_glock_disposal); atomic_dec(&sdp->sd_glock_disposal);
*glp = tmp;
out:
rcu_read_unlock();
return ret; return ret;
} }
...@@ -1918,10 +1919,10 @@ static const struct seq_operations gfs2_sbstats_seq_ops = { ...@@ -1918,10 +1919,10 @@ static const struct seq_operations gfs2_sbstats_seq_ops = {
#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL) #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
static int gfs2_glocks_open(struct inode *inode, struct file *file) static int __gfs2_glocks_open(struct inode *inode, struct file *file,
const struct seq_operations *ops)
{ {
int ret = seq_open_private(file, &gfs2_glock_seq_ops, int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
sizeof(struct gfs2_glock_iter));
if (ret == 0) { if (ret == 0) {
struct seq_file *seq = file->private_data; struct seq_file *seq = file->private_data;
struct gfs2_glock_iter *gi = seq->private; struct gfs2_glock_iter *gi = seq->private;
...@@ -1932,11 +1933,16 @@ static int gfs2_glocks_open(struct inode *inode, struct file *file) ...@@ -1932,11 +1933,16 @@ static int gfs2_glocks_open(struct inode *inode, struct file *file)
if (seq->buf) if (seq->buf)
seq->size = GFS2_SEQ_GOODSIZE; seq->size = GFS2_SEQ_GOODSIZE;
gi->gl = NULL; gi->gl = NULL;
ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL); rhashtable_walk_enter(&gl_hash_table, &gi->hti);
} }
return ret; return ret;
} }
static int gfs2_glocks_open(struct inode *inode, struct file *file)
{
return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
}
static int gfs2_glocks_release(struct inode *inode, struct file *file) static int gfs2_glocks_release(struct inode *inode, struct file *file)
{ {
struct seq_file *seq = file->private_data; struct seq_file *seq = file->private_data;
...@@ -1949,20 +1955,7 @@ static int gfs2_glocks_release(struct inode *inode, struct file *file) ...@@ -1949,20 +1955,7 @@ static int gfs2_glocks_release(struct inode *inode, struct file *file)
static int gfs2_glstats_open(struct inode *inode, struct file *file) static int gfs2_glstats_open(struct inode *inode, struct file *file)
{ {
int ret = seq_open_private(file, &gfs2_glstats_seq_ops, return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
sizeof(struct gfs2_glock_iter));
if (ret == 0) {
struct seq_file *seq = file->private_data;
struct gfs2_glock_iter *gi = seq->private;
gi->sdp = inode->i_private;
gi->last_pos = 0;
seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
if (seq->buf)
seq->size = GFS2_SEQ_GOODSIZE;
gi->gl = NULL;
ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL);
}
return ret;
} }
static int gfs2_sbstats_open(struct inode *inode, struct file *file) static int gfs2_sbstats_open(struct inode *inode, struct file *file)
......
...@@ -203,11 +203,15 @@ enum { ...@@ -203,11 +203,15 @@ enum {
DFL_DLM_RECOVERY = 6, DFL_DLM_RECOVERY = 6,
}; };
/*
* We are using struct lm_lockname as an rhashtable key. Avoid holes within
* the struct; padding at the end is fine.
*/
struct lm_lockname { struct lm_lockname {
struct gfs2_sbd *ln_sbd;
u64 ln_number; u64 ln_number;
struct gfs2_sbd *ln_sbd;
unsigned int ln_type; unsigned int ln_type;
} __packed __aligned(sizeof(int)); };
#define lm_name_equal(name1, name2) \ #define lm_name_equal(name1, name2) \
(((name1)->ln_number == (name2)->ln_number) && \ (((name1)->ln_number == (name2)->ln_number) && \
......
...@@ -202,8 +202,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, ...@@ -202,8 +202,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
fail_refresh: fail_refresh:
ip->i_iopen_gh.gh_flags |= GL_NOCACHE; ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
ip->i_iopen_gh.gh_gl->gl_object = NULL; ip->i_iopen_gh.gh_gl->gl_object = NULL;
gfs2_glock_dq_wait(&ip->i_iopen_gh); gfs2_glock_dq_uninit(&ip->i_iopen_gh);
gfs2_holder_uninit(&ip->i_iopen_gh);
fail_put: fail_put:
if (io_gl) if (io_gl)
gfs2_glock_put(io_gl); gfs2_glock_put(io_gl);
...@@ -667,6 +666,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, ...@@ -667,6 +666,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
ip->i_height = 0; ip->i_height = 0;
ip->i_depth = 0; ip->i_depth = 0;
ip->i_entries = 0; ip->i_entries = 0;
ip->i_no_addr = 0; /* Temporarily zero until real addr is assigned */
switch(mode & S_IFMT) { switch(mode & S_IFMT) {
case S_IFREG: case S_IFREG:
......
...@@ -483,13 +483,6 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd) ...@@ -483,13 +483,6 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
} }
} }
static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
{
u64 first = rgd->rd_data0;
u64 last = first + rgd->rd_data;
return first <= block && block < last;
}
/** /**
* gfs2_blk2rgrpd - Find resource group for a given data/meta block number * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
* @sdp: The GFS2 superblock * @sdp: The GFS2 superblock
......
...@@ -83,5 +83,12 @@ static inline bool gfs2_rs_active(const struct gfs2_blkreserv *rs) ...@@ -83,5 +83,12 @@ static inline bool gfs2_rs_active(const struct gfs2_blkreserv *rs)
return rs && !RB_EMPTY_NODE(&rs->rs_node); return rs && !RB_EMPTY_NODE(&rs->rs_node);
} }
static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
{
u64 first = rgd->rd_data0;
u64 last = first + rgd->rd_data;
return first <= block && block < last;
}
extern void check_and_update_goal(struct gfs2_inode *ip); extern void check_and_update_goal(struct gfs2_inode *ip);
#endif /* __RGRP_DOT_H__ */ #endif /* __RGRP_DOT_H__ */
...@@ -793,7 +793,8 @@ static void gfs2_dirty_inode(struct inode *inode, int flags) ...@@ -793,7 +793,8 @@ static void gfs2_dirty_inode(struct inode *inode, int flags)
if (!(flags & (I_DIRTY_DATASYNC|I_DIRTY_SYNC))) if (!(flags & (I_DIRTY_DATASYNC|I_DIRTY_SYNC)))
return; return;
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
return;
if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (ret) { if (ret) {
...@@ -1538,8 +1539,7 @@ static void gfs2_evict_inode(struct inode *inode) ...@@ -1538,8 +1539,7 @@ static void gfs2_evict_inode(struct inode *inode)
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh); error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
if (unlikely(error)) { if (unlikely(error)) {
ip->i_iopen_gh.gh_flags |= GL_NOCACHE; ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_wait(&ip->i_iopen_gh); gfs2_glock_dq_uninit(&ip->i_iopen_gh);
gfs2_holder_uninit(&ip->i_iopen_gh);
goto out; goto out;
} }
...@@ -1617,7 +1617,7 @@ static void gfs2_evict_inode(struct inode *inode) ...@@ -1617,7 +1617,7 @@ static void gfs2_evict_inode(struct inode *inode)
if (gfs2_holder_initialized(&ip->i_iopen_gh)) { if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) { if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
ip->i_iopen_gh.gh_flags |= GL_NOCACHE; ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_wait(&ip->i_iopen_gh); gfs2_glock_dq(&ip->i_iopen_gh);
} }
gfs2_holder_uninit(&ip->i_iopen_gh); gfs2_holder_uninit(&ip->i_iopen_gh);
} }
...@@ -1639,8 +1639,7 @@ static void gfs2_evict_inode(struct inode *inode) ...@@ -1639,8 +1639,7 @@ static void gfs2_evict_inode(struct inode *inode)
if (gfs2_holder_initialized(&ip->i_iopen_gh)) { if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
ip->i_iopen_gh.gh_gl->gl_object = NULL; ip->i_iopen_gh.gh_gl->gl_object = NULL;
ip->i_iopen_gh.gh_flags |= GL_NOCACHE; ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_wait(&ip->i_iopen_gh); gfs2_glock_dq_uninit(&ip->i_iopen_gh);
gfs2_holder_uninit(&ip->i_iopen_gh);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment