Commit 9d74288c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'gfs2-merge-window' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull gfs2 updates from Bob Peterson:
 "Here is a list of patches we've accumulated for GFS2 for the current
  upstream merge window.  There are only six patches this time:

   1. A cleanup patch from Andreas to remove the gl_spin #define in favor
      of its value for the sake of clarity.
   2. A fix from Andy Price to mark the inode dirty during fallocate.
   3. A fix from Andy Price to set s_mode on mount failures to prevent a
      stack trace.
   4  A patch from me to prevent a kernel BUG() in trans_add_meta/trans_add_data
      due to uninitialized storage.
   5. A patch from me to protecting our freeing of the in-core directory
      hash table to prevent double-free.
   6. A fix for a page/block rounding problem that resulted in a metadata
      coherency problem when the block size != page size"

  I've got a lot more patches in various stages of review and testing,
  but I'm afraid they'll have to wait until the next merge window.  So
  next time we're likely to have a lot more"

* tag 'gfs2-merge-window' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
  GFS2: Fix rgrp end rounding problem for bsize < page size
  GFS2: Protect freeing directory hash table with i_lock spin_lock
  gfs2: Remove gl_spin define
  gfs2: Add missing else in trans_add_meta/data
  GFS2: Set s_mode before parsing mount options
  GFS2: fallocate: do not rely on file_update_time to mark the inode dirty
parents 123a28d8 31dddd9e
......@@ -5,7 +5,7 @@ This documents the basic principles of the glock state machine
internals. Each glock (struct gfs2_glock in fs/gfs2/incore.h)
has two main (internal) locks:
1. A spinlock (gl_spin) which protects the internal state such
1. A spinlock (gl_lockref.lock) which protects the internal state such
as gl_state, gl_target and the list of holders (gl_holders)
2. A non-blocking bit lock, GLF_LOCK, which is used to prevent other
threads from making calls to the DLM, etc. at the same time. If a
......@@ -82,8 +82,8 @@ rather than via the glock.
Locking rules for glock operations:
Operation | GLF_LOCK bit lock held | gl_spin spinlock held
-----------------------------------------------------------------
Operation | GLF_LOCK bit lock held | gl_lockref.lock spinlock held
-------------------------------------------------------------------------
go_xmote_th | Yes | No
go_xmote_bh | Yes | No
go_inval | Yes | No
......
......@@ -388,8 +388,13 @@ static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip)
*/
void gfs2_dir_hash_inval(struct gfs2_inode *ip)
{
__be64 *hc = ip->i_hash_cache;
__be64 *hc;
spin_lock(&ip->i_inode.i_lock);
hc = ip->i_hash_cache;
ip->i_hash_cache = NULL;
spin_unlock(&ip->i_inode.i_lock);
kvfree(hc);
}
......
......@@ -897,8 +897,8 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size) {
i_size_write(inode, pos + count);
/* Marks the inode as dirty */
file_update_time(file);
mark_inode_dirty(inode);
}
return generic_write_sync(file, pos, count);
......
......@@ -246,8 +246,8 @@ static inline void do_error(struct gfs2_glock *gl, const int ret)
*/
static int do_promote(struct gfs2_glock *gl)
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
struct gfs2_holder *gh, *tmp;
......@@ -260,10 +260,10 @@ __acquires(&gl->gl_spin)
if (may_grant(gl, gh)) {
if (gh->gh_list.prev == &gl->gl_holders &&
glops->go_lock) {
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
/* FIXME: eliminate this eventually */
ret = glops->go_lock(gh);
spin_lock(&gl->gl_spin);
spin_lock(&gl->gl_lockref.lock);
if (ret) {
if (ret == 1)
return 2;
......@@ -361,7 +361,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
unsigned state = ret & LM_OUT_ST_MASK;
int rv;
spin_lock(&gl->gl_spin);
spin_lock(&gl->gl_lockref.lock);
trace_gfs2_glock_state_change(gl, state);
state_change(gl, state);
gh = find_first_waiter(gl);
......@@ -405,7 +405,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
pr_err("wanted %u got %u\n", gl->gl_target, state);
GLOCK_BUG_ON(gl, 1);
}
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
return;
}
......@@ -414,9 +414,9 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
gfs2_demote_wake(gl);
if (state != LM_ST_UNLOCKED) {
if (glops->go_xmote_bh) {
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
rv = glops->go_xmote_bh(gl, gh);
spin_lock(&gl->gl_spin);
spin_lock(&gl->gl_lockref.lock);
if (rv) {
do_error(gl, rv);
goto out;
......@@ -429,7 +429,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
out:
clear_bit(GLF_LOCK, &gl->gl_flags);
out_locked:
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
}
/**
......@@ -441,8 +441,8 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
*/
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
......@@ -464,7 +464,7 @@ __acquires(&gl->gl_spin)
(gl->gl_state == LM_ST_EXCLUSIVE) ||
(lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
clear_bit(GLF_BLOCKING, &gl->gl_flags);
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
if (glops->go_sync)
glops->go_sync(gl);
if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
......@@ -485,7 +485,7 @@ __acquires(&gl->gl_spin)
gfs2_glock_put(gl);
}
spin_lock(&gl->gl_spin);
spin_lock(&gl->gl_lockref.lock);
}
/**
......@@ -513,8 +513,8 @@ static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
*/
static void run_queue(struct gfs2_glock *gl, const int nonblock)
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{
struct gfs2_holder *gh = NULL;
int ret;
......@@ -596,7 +596,7 @@ static void glock_work_func(struct work_struct *work)
finish_xmote(gl, gl->gl_reply);
drop_ref = 1;
}
spin_lock(&gl->gl_spin);
spin_lock(&gl->gl_lockref.lock);
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED &&
gl->gl_demote_state != LM_ST_EXCLUSIVE) {
......@@ -612,7 +612,7 @@ static void glock_work_func(struct work_struct *work)
}
}
run_queue(gl, 0);
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
if (!delay)
gfs2_glock_put(gl);
else {
......@@ -876,8 +876,8 @@ void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
*/
static inline void add_to_queue(struct gfs2_holder *gh)
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{
struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
......@@ -926,10 +926,10 @@ __acquires(&gl->gl_spin)
do_cancel:
gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
if (sdp->sd_lockstruct.ls_ops->lm_cancel)
sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
spin_lock(&gl->gl_spin);
spin_lock(&gl->gl_lockref.lock);
}
return;
......@@ -967,7 +967,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
if (test_bit(GLF_LRU, &gl->gl_flags))
gfs2_glock_remove_from_lru(gl);
spin_lock(&gl->gl_spin);
spin_lock(&gl->gl_lockref.lock);
add_to_queue(gh);
if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
......@@ -977,7 +977,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
gl->gl_lockref.count--;
}
run_queue(gl, 1);
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
if (!(gh->gh_flags & GL_ASYNC))
error = gfs2_glock_wait(gh);
......@@ -1010,7 +1010,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
unsigned delay = 0;
int fast_path = 0;
spin_lock(&gl->gl_spin);
spin_lock(&gl->gl_lockref.lock);
if (gh->gh_flags & GL_NOCACHE)
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
......@@ -1018,9 +1018,9 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
if (find_first_holder(gl) == NULL) {
if (glops->go_unlock) {
GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
glops->go_unlock(gh);
spin_lock(&gl->gl_spin);
spin_lock(&gl->gl_lockref.lock);
clear_bit(GLF_LOCK, &gl->gl_flags);
}
if (list_empty(&gl->gl_holders) &&
......@@ -1033,7 +1033,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
gfs2_glock_add_to_lru(gl);
trace_gfs2_glock_queue(gh, 0);
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
if (likely(fast_path))
return;
......@@ -1217,9 +1217,9 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
delay = gl->gl_hold_time;
}
spin_lock(&gl->gl_spin);
spin_lock(&gl->gl_lockref.lock);
handle_callback(gl, state, delay, true);
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
gfs2_glock_put(gl);
}
......@@ -1259,7 +1259,7 @@ static int gfs2_should_freeze(const struct gfs2_glock *gl)
* @gl: Pointer to the glock
* @ret: The return value from the dlm
*
* The gl_reply field is under the gl_spin lock so that it is ok
* The gl_reply field is under the gl_lockref.lock lock so that it is ok
* to use a bitfield shared with other glock state fields.
*/
......@@ -1267,20 +1267,20 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
{
struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
spin_lock(&gl->gl_spin);
spin_lock(&gl->gl_lockref.lock);
gl->gl_reply = ret;
if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
if (gfs2_should_freeze(gl)) {
set_bit(GLF_FROZEN, &gl->gl_flags);
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
return;
}
}
gl->gl_lockref.count++;
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put(gl);
......@@ -1326,14 +1326,14 @@ __acquires(&lru_lock)
while(!list_empty(list)) {
gl = list_entry(list->next, struct gfs2_glock, gl_lru);
list_del_init(&gl->gl_lru);
if (!spin_trylock(&gl->gl_spin)) {
if (!spin_trylock(&gl->gl_lockref.lock)) {
add_back_to_lru:
list_add(&gl->gl_lru, &lru_list);
atomic_inc(&lru_count);
continue;
}
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
goto add_back_to_lru;
}
clear_bit(GLF_LRU, &gl->gl_flags);
......@@ -1343,7 +1343,7 @@ __acquires(&lru_lock)
WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gl->gl_lockref.count--;
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
cond_resched_lock(&lru_lock);
}
}
......@@ -1461,10 +1461,10 @@ static void clear_glock(struct gfs2_glock *gl)
{
gfs2_glock_remove_from_lru(gl);
spin_lock(&gl->gl_spin);
spin_lock(&gl->gl_lockref.lock);
if (gl->gl_state != LM_ST_UNLOCKED)
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put(gl);
}
......@@ -1482,9 +1482,9 @@ void gfs2_glock_thaw(struct gfs2_sbd *sdp)
static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
{
spin_lock(&gl->gl_spin);
spin_lock(&gl->gl_lockref.lock);
gfs2_dump_glock(seq, gl);
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
}
static void dump_glock_func(struct gfs2_glock *gl)
......@@ -1518,10 +1518,10 @@ void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
ret = gfs2_truncatei_resume(ip);
gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
spin_lock(&gl->gl_spin);
spin_lock(&gl->gl_lockref.lock);
clear_bit(GLF_LOCK, &gl->gl_flags);
run_queue(gl, 1);
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
}
static const char *state2str(unsigned state)
......
......@@ -141,7 +141,7 @@ static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *
struct pid *pid;
/* Look in glock's list of holders for one with current task as owner */
spin_lock(&gl->gl_spin);
spin_lock(&gl->gl_lockref.lock);
pid = task_pid(current);
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
......@@ -151,7 +151,7 @@ static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *
}
gh = NULL;
out:
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
return gh;
}
......
......@@ -146,11 +146,11 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
struct gfs2_rgrpd *rgd;
int error;
spin_lock(&gl->gl_spin);
spin_lock(&gl->gl_lockref.lock);
rgd = gl->gl_object;
if (rgd)
gfs2_rgrp_brelse(rgd);
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
return;
......@@ -162,11 +162,11 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
mapping_set_error(mapping, error);
gfs2_ail_empty_gl(gl);
spin_lock(&gl->gl_spin);
spin_lock(&gl->gl_lockref.lock);
rgd = gl->gl_object;
if (rgd)
gfs2_free_clones(rgd);
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
}
/**
......@@ -542,7 +542,7 @@ static int freeze_go_demote_ok(const struct gfs2_glock *gl)
* iopen_go_callback - schedule the dcache entry for the inode to be deleted
* @gl: the glock
*
* gl_spin lock is held while calling this
* gl_lockref.lock lock is held while calling this
*/
static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
{
......
......@@ -334,9 +334,8 @@ struct gfs2_glock {
struct lm_lockname gl_name;
struct lockref gl_lockref;
#define gl_spin gl_lockref.lock
/* State fields protected by gl_spin */
/* State fields protected by gl_lockref.lock */
unsigned int gl_state:2, /* Current state */
gl_target:2, /* Target state */
gl_demote_state:2, /* State requested by remote node */
......
......@@ -50,7 +50,7 @@ static void gfs2_init_glock_once(void *foo)
struct gfs2_glock *gl = foo;
INIT_HLIST_BL_NODE(&gl->gl_list);
spin_lock_init(&gl->gl_spin);
spin_lock_init(&gl->gl_lockref.lock);
INIT_LIST_HEAD(&gl->gl_holders);
INIT_LIST_HEAD(&gl->gl_lru);
INIT_LIST_HEAD(&gl->gl_ail_list);
......
......@@ -1291,6 +1291,9 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
up_write(&s->s_umount);
blkdev_put(bdev, mode);
down_write(&s->s_umount);
} else {
/* s_mode must be set before deactivate_locked_super calls */
s->s_mode = mode;
}
memset(&args, 0, sizeof(args));
......@@ -1314,7 +1317,6 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
} else {
char b[BDEVNAME_SIZE];
s->s_mode = mode;
strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
sb_set_blocksize(s, block_size(bdev));
error = fill_super(s, &args, flags & MS_SILENT ? 1 : 0);
......
......@@ -729,9 +729,9 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
rb_erase(n, &sdp->sd_rindex_tree);
if (gl) {
spin_lock(&gl->gl_spin);
spin_lock(&gl->gl_lockref.lock);
gl->gl_object = NULL;
spin_unlock(&gl->gl_spin);
spin_unlock(&gl->gl_lockref.lock);
gfs2_glock_add_to_lru(gl);
gfs2_glock_put(gl);
}
......@@ -933,8 +933,9 @@ static int read_rindex_entry(struct gfs2_inode *ip)
goto fail;
rgd->rd_gl->gl_object = rgd;
rgd->rd_gl->gl_vm.start = rgd->rd_addr * bsize;
rgd->rd_gl->gl_vm.end = rgd->rd_gl->gl_vm.start + (rgd->rd_length * bsize) - 1;
rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_CACHE_MASK;
rgd->rd_gl->gl_vm.end = PAGE_CACHE_ALIGN((rgd->rd_addr +
rgd->rd_length) * bsize) - 1;
rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
if (rgd->rd_data > sdp->sd_max_rg_data)
......
......@@ -176,6 +176,8 @@ void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh)
unlock_buffer(bh);
if (bh->b_private == NULL)
bd = gfs2_alloc_bufdata(gl, bh, &gfs2_databuf_lops);
else
bd = bh->b_private;
lock_buffer(bh);
gfs2_log_lock(sdp);
}
......@@ -236,6 +238,8 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
lock_page(bh->b_page);
if (bh->b_private == NULL)
bd = gfs2_alloc_bufdata(gl, bh, &gfs2_buf_lops);
else
bd = bh->b_private;
unlock_page(bh->b_page);
lock_buffer(bh);
gfs2_log_lock(sdp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment